Hello Heitor

I've made some changes that could help you, but I guess, just will work in
MySQL and I don't know if could have some another performance impact.

1st) Create a function in MySQL to convert bytes to human readable and run
some commands:
   mysql -u root -D bacula <
/tmp/function_convert_bytes_to_humanreadable.sql
   mysql -u root -D bacula -e "GRANT SELECT ON mysql.proc to 'bacula';"

2st) How you have changed bacula database, and create a new function is
necessary to change the script catalog backup:
/etc/bacula/scripts/make_catalog_backup.pl
   before: exec("HOME='$wd' mysqldump -f --opt $args{db_name} >
'$wd/$args{db_name}.sql'");
   after:   exec("HOME='$wd' mysqldump -f --routines --triggers --opt
$args{db_name} > '$wd/$args{db_name}.sql'");

3) Apply the patches:
 patch /root/bacula-7.4.0/src/cats/sql_list.c  sql_list.c.patch
 patch /root/bacula-7.4.0/src/cats/sql_cmds.c  sql_cmds.c.patch


Another thing that could be done is just create the function and change
sample_query.sql
Example: query nº 10
# 10
:List total files/bytes by Job
SELECT count(*) AS Jobs,sum(JobFiles) AS Files,sum(JobBytes) AS Bytes,Name
AS Job
 FROM Job GROUP by Name;

change to
# 10
:List total files/bytes by Job
SELECT count(*) AS Jobs,sum(JobFiles) AS
Files,convert_bytes_to_humanreadable(sum(JobBytes)) AS Bytes,Name AS Job
 FROM Job GROUP by Name;


Some outputs of commands (about ExpiresIn), there's no sufficient space in
the screen.

*list media pool=Mensal
Automatically selected Catalog: MyCatalog
Using Catalog "MyCatalog"
*list media pool=Mensal
+---------+--------------------+-----------+---------+----------+----------+--------------+---------+------+-----------+-----------+---------------------+---------------------+
| MediaId | VolumeName         | VolStatus | Enabled | VolBytes | VolFiles
| VolRetention | Recycle | Slot | InChanger | MediaType | LastWritten
  | ExpiresIn           |
+---------+--------------------+-----------+---------+----------+----------+--------------+---------+------+-----------+-----------+---------------------+---------------------+
|      46 | Volume-Mensal-0001 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-06-09
15:30:57 | 2016-06-08 15:30:57 |
|      47 | Volume-Mensal-0002 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-06-09
15:43:32 | 2016-06-08 15:43:32 |
|      48 | Volume-Mensal-0003 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-06-09
16:07:09 | 2016-06-08 16:07:09 |
|      49 | Volume-Mensal-0004 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-06-09
16:23:25 | 2016-06-08 16:23:25 |
|      50 | Volume-Mensal-0005 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-07-03
17:14:34 | 2016-07-02 17:14:34 |
|     101 | Volume-Mensal-0006 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-07-03
22:18:27 | 2016-07-02 22:18:27 |
|     102 | Volume-Mensal-0007 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-07-03
22:31:50 | 2016-07-02 22:31:50 |
|     103 | Volume-Mensal-0008 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-07-03
22:44:58 | 2016-07-02 22:44:58 |
|     104 | Volume-Mensal-0009 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-07-03
22:53:59 | 2016-07-02 22:53:59 |
|     105 | Volume-Mensal-0010 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-08-07
21:30:30 | 2016-08-06 21:30:30 |
|     123 | Volume-Mensal-0011 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-08-07
21:41:30 | 2016-08-06 21:41:30 |
|     124 | Volume-Mensal-0012 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-08-07
21:53:46 | 2016-08-06 21:53:46 |
|     125 | Volume-Mensal-0013 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-08-07
22:04:13 | 2016-08-06 22:04:13 |
|     126 | Volume-Mensal-0014 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-08-07
22:16:48 | 2016-08-06 22:16:48 |
|     127 | Volume-Mensal-0015 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-09-04
21:31:21 | 2016-09-03 21:31:21 |
|     129 | Volume-Mensal-0016 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-09-04
21:41:57 | 2016-09-03 21:41:57 |
|     130 | Volume-Mensal-0017 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-09-04
21:54:32 | 2016-09-03 21:54:32 |
|     131 | Volume-Mensal-0018 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-09-04
22:04:59 | 2016-09-03 22:04:59 |
|     132 | Volume-Mensal-0019 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-09-04
22:17:00 | 2016-09-03 22:17:00 |
|     133 | Volume-Mensal-0020 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-10-02
21:31:24 | 2016-10-01 21:31:24 |
|     155 | Volume-Mensal-0021 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-10-02
21:41:54 | 2016-10-01 21:41:54 |
|     156 | Volume-Mensal-0022 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-10-02
21:54:02 | 2016-10-01 21:54:02 |
|     157 | Volume-Mensal-0023 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-10-02
22:05:11 | 2016-10-01 22:05:11 |
|     158 | Volume-Mensal-0024 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-10-02
22:13:17 | 2016-10-01 22:13:17 |
|     159 | Volume-Mensal-0025 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-11-06
17:11:17 | 2016-11-05 17:11:17 |
|     160 | Volume-Mensal-0026 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-11-06
21:35:14 | 2016-11-05 21:35:14 |
|     161 | Volume-Mensal-0027 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-11-06
21:46:54 | 2016-11-05 21:46:54 |
|     162 | Volume-Mensal-0028 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-11-06
21:59:06 | 2016-11-05 21:59:06 |
|     163 | Volume-Mensal-0029 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-11-06
22:09:46 | 2016-11-05 22:09:46 |
|     164 | Volume-Mensal-0030 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-11-06
22:19:29 | 2016-11-05 22:19:29 |
|     165 | Volume-Mensal-0031 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-12-04
17:16:20 | 2016-12-03 17:16:20 |
|     166 | Volume-Mensal-0032 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-12-04
21:39:09 | 2016-12-03 21:39:09 |
|     167 | Volume-Mensal-0033 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-12-04
21:51:32 | 2016-12-03 21:51:32 |
|     168 | Volume-Mensal-0034 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-12-04
22:02:34 | 2016-12-03 22:02:34 |
|     169 | Volume-Mensal-0035 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-12-04
22:12:04 | 2016-12-03 22:12:04 |
|     170 | Volume-Mensal-0036 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2015-12-04
22:25:17 | 2016-12-03 22:25:17 |
|     171 | Volume-Mensal-0037 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-01-01
21:34:45 | 2016-12-31 21:34:45 |
|     172 | Volume-Mensal-0038 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-01-01
21:46:14 | 2016-12-31 21:46:14 |
|     173 | Volume-Mensal-0039 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-01-01
21:58:05 | 2016-12-31 21:58:05 |
|     174 | Volume-Mensal-0040 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-01-01
22:08:39 | 2016-12-31 22:08:39 |
|     175 | Volume-Mensal-0041 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-01-01
22:17:10 | 2016-12-31 22:17:10 |
|     176 | Volume-Mensal-0042 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-02-05
17:13:11 | 2017-02-04 17:13:11 |
|     177 | Volume-Mensal-0043 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-02-05
21:38:39 | 2017-02-04 21:38:39 |
|     178 | Volume-Mensal-0044 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-02-05
21:56:40 | 2017-02-04 21:56:40 |
|     179 | Volume-Mensal-0045 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-02-05
22:17:27 | 2017-02-04 22:17:27 |
|     180 | Volume-Mensal-0046 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-02-05
22:35:27 | 2017-02-04 22:35:27 |
|     181 | Volume-Mensal-0047 | Full      |       1 | 3.73 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-02-05
22:54:48 | 2017-02-04 22:54:48 |
|     182 | Volume-Mensal-0048 | Append    |       1 | 1.36 GB  |        0
|   31,536,000 |       1 |    0 |         0 | File      | 2016-02-05
23:05:01 | 2017-02-04 23:05:01 |
+---------+--------------------+-----------+---------+----------+----------+--------------+---------+------+-----------+-----------+---------------------+---------------------+



*list job=Backup_Servidor_Bacula limit=5
+-------+------------------------+---------------------+------+-------+----------+----------+-----------+
| JobId | Name                   | StartTime           | Type | Level |
JobFiles | JobBytes | JobStatus |
+-------+------------------------+---------------------+------+-------+----------+----------+-----------+
|   858 | Backup_Servidor_Bacula | 2015-06-05 23:05:02 | B    | F     |
 4,349 | 35.23 MB | T         |
| 1,085 | Backup_Servidor_Bacula | 2015-07-03 22:00:00 | B    | F     |
 4,315 | 71.46 MB | T         |
| 1,344 | Backup_Servidor_Bacula | 2015-08-07 21:30:00 | B    | F     |
 4,308 | 39.97 MB | T         |
| 1,581 | Backup_Servidor_Bacula | 2015-09-04 22:22:05 | B    | F     |
 4,354 | 41.06 MB | T         |
| 1,896 | Backup_Servidor_Bacula | 2015-10-02 22:26:02 | B    | F     |
 4,373 | 58.04 MB | T         |
+-------+------------------------+---------------------+------+-------+----------+----------+-----------+


*list pool
+--------+--------------+---------+---------+-------------+--------------+---------+----------+-----------------------------------------------+
| PoolId | Name         | NumVols | MaxVols | MaxVolBytes | VolRetention |
Enabled | PoolType | LabelFormat                                   |
+--------+--------------+---------+---------+-------------+--------------+---------+----------+-----------------------------------------------+
|      2 | Scratch      |       0 |       0 | 0.00 Bytes  |   31,536,000 |
      1 | Backup   | *                                             |
|      6 | Diaria       |      15 |      15 | 3.73 GB     |   31,536,000 |
      1 | Backup   | Volume-Diario-${NumVolsDiaria+:p/4/0/r}       |
|      7 | Semanal      |      25 |      25 | 3.73 GB     |   31,536,000 |
      1 | Backup   | Volume-Semanal-${NumVolsSemanal+:p/4/0/r}     |
|      8 | Mensal       |      48 |      70 | 3.73 GB     |   31,536,000 |
      1 | Backup   | Volume-Mensal-${NumVolsMensal+:p/4/0/r}       |
|     11 | Pool-Temp    |       0 |     100 | 9.31 GB     |   31,536,000 |
      1 | Backup   | Volume-Temporario-${NumVolsPoolTemp+:p/4/0/r} |
|     13 | VM           |      20 |      20 | 9.31 GB     |   31,536,000 |
      1 | Backup   | Volume-VM-${NumVolsVM+:p/4/0/r}               |
|     14 | Pool-Default |       0 |     100 | 9.31 GB     |   31,536,000 |
      1 | Backup   | Volume-Default-${NumVolsPoolDefault+:p/4/0/r} |
+--------+--------------+---------+---------+-------------+--------------+---------+----------+-----------------------------------------------+


*list jobtotals
+------+-----------+-----------+-----------------------------+
| Jobs | Files     | Bytes     | Job                         |
+------+-----------+-----------+-----------------------------+
|   39 |        54 | 3.33 GB   | Backup_Catalogo             |
|   48 |    81,062 | 3.40 GB   | Backup_Maquina_001          |
|   35 |   224,238 | 9.51 GB   | Backup_Maquina_002          |
|   37 |   179,261 | 7.10 GB   | Backup_Maquina_003          |
|   67 |       201 | 29.70 GB  | Backup_Banco_Dump           |
|   34 |        99 | 44.09 GB  | Backup_Banco_Arquivo        |
|   36 |   111,750 | 1.58 GB   | Backup_Servidor_Bacula      |
|   35 |   187,562 | 3.22 GB   | Backup_Servidor_Firewall    |
|   31 |       444 | 66.24 MB  | Backup_Servidor_Windows001  |
|   36 |   458,804 | 8.33 GB   | Backup_Servidor_Intranet    |
|   35 | 2,386,115 | 206.08 GB | Backup_Servidor_Windows002  |
|    2 |         8 | 11.62 GB  | Backup_VM_001               |
|    8 |        24 | 36.83 GB  | Backup_VM_Servidor_AD       |
|    7 |        24 | 56.98 GB  | Backup_VM_Servidor_002      |
|    9 |        28 | 78.08 GB  | Backup_VM_Servidor_003      |
|   38 |   415,459 | 157.80 GB | Restore_Files               |
+------+-----------+-----------+-----------------------------+
+------+-----------+-----------+
| Jobs | Files     | Bytes     |
+------+-----------+-----------+
|  497 | 4,045,133 | 657.72 GB |
+------+-----------+-----------+

Atenciosamente

*Nome **|* *Wanderlei Hüttel*
*Blog*  | http://www.huttel.com.br



2016-02-16 19:20 GMT-02:00 Dan Langille <d...@langille.org>:

> > On Feb 16, 2016, at 3:59 PM, Heitor Faria <hei...@bacula.com.br> wrote:
> >
> >>>> Hello Kern and Heitor,
> >>
> >>> I really like the new "expiresin" field, it is quite helpful.
> >>>
> >>> I agree with Ana about the formatting of it though - Even though we are
> >>> pushing the envelope with regards to the column width of the "list
> media"
> >>> output. :)
> >
> > Hello, Bill: that´s the beauty of human readable output. The way data is
> growing only a 50 inches monitor would be able to display list media output
> with volume size in bytes and retention in seconds. Please, support me. =)
>
>
> Consider also that other programs want the output.  Having raw output (ie.
> no commas) is also useful.
>
> --
> Dan Langille - BSDCan / PGCon
> d...@langille.org
>
>
>
>
>
>
> ------------------------------------------------------------------------------
> Site24x7 APM Insight: Get Deep Visibility into Application Performance
> APM + Mobile APM + RUM: Monitor 3 App instances at just $35/Month
> Monitor end-to-end web transactions and take corrective actions now
> Troubleshoot faster and improve end-user experience. Signup Now!
> http://pubads.g.doubleclick.net/gampad/clk?id=272487151&iu=/4140
> _______________________________________________
> Bacula-users mailing list
> Bacula-users@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/bacula-users
>
/*
   Bacula(R) - The Network Backup Solution

   Copyright (C) 2000-2015 Kern Sibbald
   Copyright (C) 2000-2014 Free Software Foundation Europe e.V.

   The original author of Bacula is Kern Sibbald, with contributions
   from many others, a complete list can be found in the file AUTHORS.

   You may use this file and others of this release according to the
   license defined in the LICENSE file, which includes the Affero General
   Public License, v3.0 ("AGPLv3") and some additional permissions and
   terms pursuant to its AGPLv3 Section 7.

   This notice must be preserved when any source code is 
   conveyed and/or propagated.

   Bacula(R) is a registered trademark of Kern Sibbald.
*/
/*
 *  This file contains all the SQL commands that are either issued by
 *   the Director or which are database backend specific.
 *
 *     Written by Kern Sibbald, July MMII
 */
/*
 * Note, PostgreSQL imposes some constraints on using DISTINCT and GROUP BY
 *  for example, the following is illegal in PostgreSQL:
 *  SELECT DISTINCT JobId FROM temp ORDER BY StartTime ASC;
 *  because all the ORDER BY expressions must appear in the SELECT list!
 */

#include "bacula.h"

const char *get_restore_objects = 
   "SELECT JobId,ObjectLength,ObjectFullLength,ObjectIndex,"
           "ObjectType,ObjectCompression,FileIndex,ObjectName,"
           "RestoreObject,PluginName "
    "FROM RestoreObject "
   "WHERE JobId IN (%s) "
     "AND ObjectType = %d "
   "ORDER BY ObjectIndex ASC";

const char *cleanup_created_job =
   "UPDATE Job SET JobStatus='f', StartTime=SchedTime, EndTime=SchedTime "
   "WHERE JobStatus = 'C'";
const char *cleanup_running_job = 
   "UPDATE Job SET JobStatus='f', EndTime=StartTime WHERE JobStatus = 'R'";

/* For sql_update.c db_update_stats */
const char *fill_jobhisto =
        "INSERT INTO JobHisto (JobId, Job, Name, Type, Level,"
           "ClientId, JobStatus,"
           "SchedTime, StartTime, EndTime, RealEndTime, JobTDate,"
           "VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes,"
           "JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId,"
           "PurgedFiles, HasBase, Reviewed, Comment)"
        "SELECT JobId, Job, Name, Type, Level, ClientId, JobStatus,"
           "SchedTime, StartTime, EndTime, RealEndTime, JobTDate,"
           "VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes,"
           "JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId,"
           "PurgedFiles, HasBase, Reviewed, Comment "
          "FROM Job "
         "WHERE JobStatus IN ('T','W','f','A','E')"
           "AND NOT EXISTS "
                "(SELECT JobHisto.JobId "
                   "FROM JobHisto WHERE JobHisto.Jobid=Job.JobId)"
           "AND JobTDate < %s ";

/* For ua_update.c */
const char *list_pool = "SELECT * FROM Pool WHERE PoolId=%s";

/* For ua_dotcmds.c */
const char *client_backups =
   "SELECT DISTINCT Job.JobId,Client.Name as Client,Level,StartTime,"
   "JobFiles,JobBytes,VolumeName,MediaType,FileSet,Media.Enabled as Enabled"
   " FROM Client,Job,JobMedia,Media,FileSet"
   " WHERE Client.Name='%s'"
   " AND FileSet='%s'"
   " AND Client.ClientId=Job.ClientId "
   " AND JobStatus IN ('T','W') AND Type='B' "
   " AND JobMedia.JobId=Job.JobId AND JobMedia.MediaId=Media.MediaId "
   " AND Job.FileSetId=FileSet.FileSetId"
   " ORDER BY Job.StartTime";

/* ====== ua_prune.c */

const char *sel_JobMedia = 
   "SELECT DISTINCT JobMedia.JobId FROM JobMedia,Job"
   " WHERE MediaId=%s AND Job.JobId=JobMedia.JobId "
   " AND Job.JobTDate<%s";

/* Delete temp tables and indexes  */
const char *drop_deltabs[] = {
   "DROP TABLE DelCandidates",
   NULL};

const char *create_delindex = "CREATE INDEX DelInx1 ON DelCandidates (JobId)";

/* ======= ua_restore.c */
const char *uar_count_files =
   "SELECT JobFiles FROM Job WHERE JobId=%s";

/* List last 20 Jobs */
const char *uar_list_jobs =
   "SELECT JobId,Client.Name as Client,StartTime,Level as "
   "JobLevel,JobFiles,JobBytes "
   "FROM Client,Job WHERE Client.ClientId=Job.ClientId AND JobStatus IN ('T','W') "
   "AND Type='B' ORDER BY StartTime DESC LIMIT 20";

const char *uar_print_jobs = 
   "SELECT DISTINCT JobId,Level,JobFiles,convert_bytes_to_humanreadable(JobBytes) AS JobBytes,StartTime,VolumeName"
   " FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) "
   " WHERE JobId IN (%s) "
   " ORDER BY StartTime ASC";

/*
 * Find all files for a particular JobId and insert them into
 *  the tree during a restore.
 */
const char *uar_sel_files =
   "SELECT Path.Path,Filename.Name,FileIndex,JobId,LStat "
   "FROM File,Filename,Path "
   "WHERE File.JobId IN (%s) AND Filename.FilenameId=File.FilenameId "
   "AND Path.PathId=File.PathId";

const char *uar_del_temp  = "DROP TABLE temp";
const char *uar_del_temp1 = "DROP TABLE temp1";

const char *uar_last_full =
   "INSERT INTO temp1 SELECT Job.JobId,JobTdate "
   "FROM Client,Job,JobMedia,Media,FileSet WHERE Client.ClientId=%s "
   "AND Job.ClientId=%s "
   "AND Job.StartTime < '%s' "
   "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' "
   "AND JobMedia.JobId=Job.JobId "
   "AND Media.Enabled=1 "
   "AND JobMedia.MediaId=Media.MediaId "
   "AND Job.FileSetId=FileSet.FileSetId "
   "AND FileSet.FileSet='%s' "
   "%s"
   "ORDER BY Job.JobTDate DESC LIMIT 1";

const char *uar_full =
   "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,"
   "Job.ClientId,Job.Level,Job.JobFiles,Job.JobBytes,"
   "StartTime,VolumeName,JobMedia.StartFile,VolSessionId,VolSessionTime "
   "FROM temp1,Job,JobMedia,Media WHERE temp1.JobId=Job.JobId "
   "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' "
   "AND Media.Enabled=1 "
   "AND JobMedia.JobId=Job.JobId "
   "AND JobMedia.MediaId=Media.MediaId";

const char *uar_dif =
   "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId,"
   "Job.Level,Job.JobFiles,Job.JobBytes,"
   "Job.StartTime,Media.VolumeName,JobMedia.StartFile,"
   "Job.VolSessionId,Job.VolSessionTime "
   "FROM Job,JobMedia,Media,FileSet "
   "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' "
   "AND Job.ClientId=%s "
   "AND JobMedia.JobId=Job.JobId "
   "AND Media.Enabled=1 "
   "AND JobMedia.MediaId=Media.MediaId "
   "AND Job.Level='D' AND JobStatus IN ('T','W') AND Type='B' "
   "AND Job.FileSetId=FileSet.FileSetId "
   "AND FileSet.FileSet='%s' "
   "%s"
   "ORDER BY Job.JobTDate DESC LIMIT 1";

const char *uar_inc =
   "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId,"
   "Job.Level,Job.JobFiles,Job.JobBytes,"
   "Job.StartTime,Media.VolumeName,JobMedia.StartFile,"
   "Job.VolSessionId,Job.VolSessionTime "
   "FROM Job,JobMedia,Media,FileSet "
   "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' "
   "AND Job.ClientId=%s "
   "AND Media.Enabled=1 "
   "AND JobMedia.JobId=Job.JobId "
   "AND JobMedia.MediaId=Media.MediaId "
   "AND Job.Level='I' AND JobStatus IN ('T','W') AND Type='B' "
   "AND Job.FileSetId=FileSet.FileSetId "
   "AND FileSet.FileSet='%s' "
   "%s";

const char *uar_list_temp =
   "SELECT DISTINCT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName"
   " FROM temp"
   " ORDER BY StartTime ASC";


const char *uar_sel_jobid_temp = 
   "SELECT DISTINCT JobId,StartTime FROM temp ORDER BY StartTime ASC";

const char *uar_sel_all_temp1 = "SELECT * FROM temp1";

const char *uar_sel_all_temp = "SELECT * FROM temp";



/* Select FileSet names for this Client */
const char *uar_sel_fileset =
   "SELECT DISTINCT FileSet.FileSet FROM Job,"
   "Client,FileSet WHERE Job.FileSetId=FileSet.FileSetId "
   "AND Job.ClientId=%s AND Client.ClientId=%s "
   "ORDER BY FileSet.FileSet";

/* Select all different FileSet for this client
 * This query doesn't guarantee that the Id is the latest
 * version of the FileSet. Can be used with other queries that
 * use Ids to select the FileSet name. (like in accurate)
 */
const char *uar_sel_filesetid =
   "SELECT MAX(FileSet.FileSetId) "
     "FROM FileSet JOIN Job USING (FileSetId) "
         "WHERE Job.ClientId=%s "
        "GROUP BY FileSet";

/*
 *  Find JobId, FileIndex for a given path/file and date
 *  for use when inserting individual files into the tree.
 */
const char *uar_jobid_fileindex =
   "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
   "WHERE Job.JobId=File.JobId "
   "AND Job.StartTime<='%s' "
   "AND Path.Path='%s' "
   "AND Filename.Name='%s' "
   "AND Client.Name='%s' "
   "AND Job.ClientId=Client.ClientId "
   "AND Path.PathId=File.PathId "
   "AND Filename.FilenameId=File.FilenameId "
   "AND JobStatus IN ('T','W') AND Type='B' "
   "ORDER BY Job.StartTime DESC LIMIT 1";

const char *uar_jobids_fileindex =
   "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
   "WHERE Job.JobId IN (%s) "
   "AND Job.JobId=File.JobId "
   "AND Job.StartTime<='%s' "
   "AND Path.Path='%s' "
   "AND Filename.Name='%s' "
   "AND Client.Name='%s' "
   "AND Job.ClientId=Client.ClientId "
   "AND Path.PathId=File.PathId "
   "AND Filename.FilenameId=File.FilenameId "
   "ORDER BY Job.StartTime DESC LIMIT 1";

/* Query to get list of files from table -- presuably built by an external program */
const char *uar_jobid_fileindex_from_table = 
   "SELECT JobId, FileIndex FROM %s ORDER BY JobId, FileIndex ASC";

/* Get the list of the last recent version per Delta with a given
 *  jobid list. This is a tricky part because with SQL the result of:
 *
 *   SELECT MAX(A), B, C, D FROM... GROUP BY (B,C)
 *
 * doesn't give the good result (for D).
 *
 * With PostgreSQL, we can use DISTINCT ON(), but with Mysql or Sqlite,
 *  we need an extra join using JobTDate.
 */
static const char *select_recent_version_with_basejob_default =
"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, "
       "File.FilenameId AS FilenameId, LStat, MD5, DeltaSeq, "
       "Job.JobTDate AS JobTDate "
"FROM Job, File, ( "
    "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId "
      "FROM ( "
        "SELECT JobTDate, PathId, FilenameId "   /* Get all normal files */
          "FROM File JOIN Job USING (JobId) "    /* from selected backup */
         "WHERE File.JobId IN (%s) "
          "UNION ALL "
        "SELECT JobTDate, PathId, FilenameId "   /* Get all files from */
          "FROM BaseFiles "                      /* BaseJob */
               "JOIN File USING (FileId) "
               "JOIN Job  ON    (BaseJobId = Job.JobId) "
         "WHERE BaseFiles.JobId IN (%s) "        /* Use Max(JobTDate) to find */
       ") AS tmp "
       "GROUP BY PathId, FilenameId "            /* the latest file version */
    ") AS T1 "
"WHERE (Job.JobId IN ( "  /* Security, we force JobId to be valid */
        "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
        "OR Job.JobId IN (%s)) "
  "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */
  "AND Job.JobId = File.JobId "     /* Job/File record */
  "AND T1.PathId = File.PathId "
  "AND T1.FilenameId = File.FilenameId";

const char *select_recent_version_with_basejob[] =
{
   /* MySQL  */
   select_recent_version_with_basejob_default,

   /* Postgresql */    /* The DISTINCT ON () permits to avoid extra join */
   "SELECT DISTINCT ON (FilenameId, PathId) JobTDate, JobId, FileId, "
         "FileIndex, PathId, FilenameId, LStat, MD5, DeltaSeq "
   "FROM "
     "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5, DeltaSeq "
         "FROM File WHERE JobId IN (%s) "
        "UNION ALL "
       "SELECT File.FileId, File.JobId, PathId, FilenameId, "
              "File.FileIndex, LStat, MD5, DeltaSeq "
         "FROM BaseFiles JOIN File USING (FileId) "
        "WHERE BaseFiles.JobId IN (%s) "
       ") AS T JOIN Job USING (JobId) "
   "ORDER BY FilenameId, PathId, JobTDate DESC ",

   /* SQLite */
   select_recent_version_with_basejob_default
};
 
/* We do the same thing than the previous query, but we include
 * all delta parts. If the file has been deleted, we can have irrelevant
 * parts.
 *
 * The code that uses results should control the delta sequence with
 * the following rules:
 * First Delta = 0
 * Delta = Previous Delta + 1
 *
 * If we detect a gap, we can discard further pieces
 * If a file starts at 1 instead of 0, the file has been deleted, and further
 *   pieces are useless.
 * This control should be reset for each new file
 */
static const char *select_recent_version_with_basejob_and_delta_default =
"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, "
       "File.FilenameId AS FilenameId, LStat, MD5, File.DeltaSeq AS DeltaSeq, "
       "Job.JobTDate AS JobTDate "
"FROM Job, File, ( "
    "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId, DeltaSeq "
      "FROM ( "
       "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all normal files*/
         "FROM File JOIN Job USING (JobId) "          /* from selected backup */
        "WHERE File.JobId IN (%s) "
         "UNION ALL "
       "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all files from */
         "FROM BaseFiles "                            /* BaseJob */
              "JOIN File USING (FileId) "
              "JOIN Job  ON    (BaseJobId = Job.JobId) "
        "WHERE BaseFiles.JobId IN (%s) "        /* Use Max(JobTDate) to find */
       ") AS tmp "
       "GROUP BY PathId, FilenameId, DeltaSeq "    /* the latest file version */
    ") AS T1 "
"WHERE (Job.JobId IN ( "  /* Security, we force JobId to be valid */
        "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
        "OR Job.JobId IN (%s)) "
  "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */
  "AND Job.JobId = File.JobId "     /* Job/File record */
  "AND T1.PathId = File.PathId "
  "AND T1.FilenameId = File.FilenameId";

const char *select_recent_version_with_basejob_and_delta[] = {
   /* MySQL */
   select_recent_version_with_basejob_and_delta_default,

   /* Postgresql */    /* The DISTINCT ON () permits to avoid extra join */
   "SELECT DISTINCT ON (FilenameId, PathId, DeltaSeq) JobTDate, JobId, FileId, "
         "FileIndex, PathId, FilenameId, LStat, MD5, DeltaSeq "
   "FROM "
    "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5,DeltaSeq "
         "FROM File WHERE JobId IN (%s) "
        "UNION ALL "
       "SELECT File.FileId, File.JobId, PathId, FilenameId, "
              "File.FileIndex, LStat, MD5, DeltaSeq "
         "FROM BaseFiles JOIN File USING (FileId) "
        "WHERE BaseFiles.JobId IN (%s) "
       ") AS T JOIN Job USING (JobId) "
   "ORDER BY FilenameId, PathId, DeltaSeq, JobTDate DESC ",

   /* SQLite */
   select_recent_version_with_basejob_and_delta_default
};

/* Get the list of the last recent version with a given BaseJob jobid list
 * We don't handle Delta with BaseJobs, they have only Full files
 */
static const char *select_recent_version_default = 
  "SELECT j1.JobId AS JobId, f1.FileId AS FileId, f1.FileIndex AS FileIndex, "
          "f1.PathId AS PathId, f1.FilenameId AS FilenameId, "
          "f1.LStat AS LStat, f1.MD5 AS MD5, j1.JobTDate "
     "FROM ( "     /* Choose the last version for each Path/Filename */
       "SELECT max(JobTDate) AS JobTDate, PathId, FilenameId "
         "FROM File JOIN Job USING (JobId) "
        "WHERE File.JobId IN (%s) "
       "GROUP BY PathId, FilenameId "
     ") AS t1, Job AS j1, File AS f1 "
    "WHERE t1.JobTDate = j1.JobTDate "
      "AND j1.JobId IN (%s) "
      "AND t1.FilenameId = f1.FilenameId "
      "AND t1.PathId = f1.PathId "
      "AND j1.JobId = f1.JobId";

const char *select_recent_version[] =
{
   /* MySQL */
   select_recent_version_default,

   /* Postgresql */
   "SELECT DISTINCT ON (FilenameId, PathId) JobTDate, JobId, FileId, "
          "FileIndex, PathId, FilenameId, LStat, MD5 "
     "FROM File JOIN Job USING (JobId) "
    "WHERE JobId IN (%s) "
    "ORDER BY FilenameId, PathId, JobTDate DESC ",

   /* SQLite */
   select_recent_version_default
};

/* We don't create this table as TEMPORARY because MySQL 
    MyISAM 5.0 and 5.1 are unable to run further queries in this mode
 */
static const char *create_temp_accurate_jobids_default = 
 "CREATE TABLE btemp3%s AS "
    "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles "
      "FROM Job JOIN FileSet USING (FileSetId) "
     "WHERE ClientId = %s "
       "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' "
       "AND StartTime<'%s' "
       "AND FileSet.FileSet=(SELECT FileSet FROM FileSet WHERE FileSetId = %s) "
     "ORDER BY Job.JobTDate DESC LIMIT 1";

const char *create_temp_accurate_jobids[] = {
   /* Mysql */
   create_temp_accurate_jobids_default,

   /* Postgresql */
   create_temp_accurate_jobids_default,

   /* SQLite3 */
   create_temp_accurate_jobids_default
};

const char *create_temp_basefile[] = {
   /* Mysql */
   "CREATE TEMPORARY TABLE basefile%lld ("
   "Path BLOB NOT NULL,"
   "Name BLOB NOT NULL,"
   "INDEX (Path(255), Name(255)))",

   /* Postgresql */
   "CREATE TEMPORARY TABLE basefile%lld ("
   "Path TEXT,"
   "Name TEXT)",

   /* SQLite3 */
   "CREATE TEMPORARY TABLE basefile%lld ("
   "Path TEXT,"
   "Name TEXT)"
};

const char *create_temp_new_basefile[] = {
   /* Mysql */
   "CREATE TEMPORARY TABLE new_basefile%lld AS "
   "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
   "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, "
   "Temp.MD5 AS MD5 "
   "FROM ( %s ) AS Temp "
   "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
   "JOIN Path ON (Path.PathId = Temp.PathId) "
   "WHERE Temp.FileIndex > 0",

   /* Postgresql */
   "CREATE TEMPORARY TABLE new_basefile%lld AS "
   "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
   "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, "
   "Temp.MD5 AS MD5 "
   "FROM ( %s ) AS Temp "
   "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
   "JOIN Path ON (Path.PathId = Temp.PathId) "
   "WHERE Temp.FileIndex > 0",

   /* SQLite3 */
   "CREATE TEMPORARY TABLE new_basefile%lld AS "
   "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
   "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, "
   "Temp.MD5 AS MD5 "
   "FROM ( %s ) AS Temp "
   "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
   "JOIN Path ON (Path.PathId = Temp.PathId) "
   "WHERE Temp.FileIndex > 0"
};

/* ====== ua_prune.c */

/* List of SQL commands to create temp table and indicies  */
const char *create_deltabs[] =
{
   /* MySQL */
   "CREATE TEMPORARY TABLE DelCandidates ("
   "JobId INTEGER UNSIGNED NOT NULL, "
   "PurgedFiles TINYINT, "
   "FileSetId INTEGER UNSIGNED, "
   "JobFiles INTEGER UNSIGNED, "
   "JobStatus BINARY(1))",
 
   /* PostgreSQL */
   "CREATE TEMPORARY TABLE DelCandidates ( "
   "JobId INTEGER NOT NULL, "
   "PurgedFiles SMALLINT, "
   "FileSetId INTEGER, "
   "JobFiles INTEGER, "
   "JobStatus char(1))",
 
   /* SQLite */
   "CREATE TEMPORARY TABLE DelCandidates ("
   "JobId INTEGER UNSIGNED NOT NULL, "
   "PurgedFiles TINYINT, "
   "FileSetId INTEGER UNSIGNED, "
   "JobFiles INTEGER UNSIGNED, "
   "JobStatus CHAR)"
}; 

/* ======= ua_purge.c ====== */
/* Select the first available Copy Job that must be upgraded 
 *   to a Backup job when the original backup job is expired.
 */
static const char *uap_upgrade_copies_oldest_job_default = 
"CREATE TEMPORARY TABLE cpy_tmp AS "
       "SELECT MIN(JobId) AS JobId FROM Job "     /* Choose the oldest job */
        "WHERE Type='%c' "                        /* JT_JOB_COPY */
          "AND ( PriorJobId IN (%s) "             /* JobId selection */
              "OR "
               " PriorJobId IN ( "
                  "SELECT PriorJobId "
                    "FROM Job "
                   "WHERE JobId IN (%s) "         /* JobId selection */
                    " AND Type='B' "
                 ") "
              ") "
          "GROUP BY PriorJobId ";           /* one result per copy */

const char *uap_upgrade_copies_oldest_job[] =
{
   /* MySQL */
   uap_upgrade_copies_oldest_job_default,
   /* PostgreSQL */
   uap_upgrade_copies_oldest_job_default,
   /* SQLite */
   uap_upgrade_copies_oldest_job_default
}; 
 
/* ======= ua_restore.c ====== */

/* List Jobs where a particular file is saved */
const char *uar_file[] =
{
   /* MySQL */
   "SELECT Job.JobId as JobId,"
   "CONCAT(Path.Path,Filename.Name) as Name, "
   "StartTime,Type as JobType,JobStatus,JobFiles,convert_bytes_to_humanreadable(JobBytes) AS JobBytes "
   "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
   "AND Client.ClientId=Job.ClientId "
   "AND Job.JobId=File.JobId AND File.FileIndex > 0 "
   "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
   "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",

   /* Postgresql */
   "SELECT Job.JobId as JobId,"
   "Path.Path||Filename.Name as Name, "
   "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
   "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
   "AND Client.ClientId=Job.ClientId "
   "AND Job.JobId=File.JobId AND File.FileIndex > 0 "
   "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
   "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",

   /* SQLite3 */
   "SELECT Job.JobId as JobId,"
   "Path.Path||Filename.Name as Name, "
   "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
   "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
   "AND Client.ClientId=Job.ClientId "
   "AND Job.JobId=File.JobId AND File.FileIndex > 0 "
   "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
   "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20"
};

const char *uar_create_temp[] = {
   /* Mysql */
   "CREATE TEMPORARY TABLE temp ("
   "JobId INTEGER UNSIGNED NOT NULL,"
   "JobTDate BIGINT UNSIGNED,"
   "ClientId INTEGER UNSIGNED,"
   "Level CHAR,"
   "JobFiles INTEGER UNSIGNED,"
   "JobBytes BIGINT UNSIGNED,"
   "StartTime TEXT,"
   "VolumeName TEXT,"
   "StartFile INTEGER UNSIGNED,"
   "VolSessionId INTEGER UNSIGNED,"
   "VolSessionTime INTEGER UNSIGNED)",
 
   /* PostgreSQL */
   "CREATE TEMPORARY TABLE temp ("
   "JobId INTEGER NOT NULL,"
   "JobTDate BIGINT,"
   "ClientId INTEGER,"
   "Level CHAR,"
   "JobFiles INTEGER,"
   "JobBytes BIGINT,"
   "StartTime TEXT,"
   "VolumeName TEXT,"
   "StartFile INTEGER,"
   "VolSessionId INTEGER,"
   "VolSessionTime INTEGER)",
 
   /* SQLite */
   "CREATE TEMPORARY TABLE temp ("
   "JobId INTEGER UNSIGNED NOT NULL,"
   "JobTDate BIGINT UNSIGNED,"
   "ClientId INTEGER UNSIGNED,"
   "Level CHAR,"
   "JobFiles INTEGER UNSIGNED,"
   "JobBytes BIGINT UNSIGNED,"
   "StartTime TEXT,"
   "VolumeName TEXT,"
   "StartFile INTEGER UNSIGNED,"
   "VolSessionId INTEGER UNSIGNED,"
   "VolSessionTime INTEGER UNSIGNED)"
}; 

const char *uar_create_temp1[] =
{
   /* MySQL */
   "CREATE TEMPORARY TABLE temp1 ("
   "JobId INTEGER UNSIGNED NOT NULL,"
   "JobTDate BIGINT UNSIGNED)",
   /* PostgreSQL */
   "CREATE TEMPORARY TABLE temp1 ("
   "JobId INTEGER NOT NULL,"
   "JobTDate BIGINT)",
   /* SQLite */
   "CREATE TEMPORARY TABLE temp1 ("
   "JobId INTEGER UNSIGNED NOT NULL,"
   "JobTDate BIGINT UNSIGNED)"
}; 

/* Query to get all files in a directory no recursing
 *  Note, for PostgreSQL since it respects the "Single Value
 *  rule", the results of the SELECT will be unoptimized.
 *  I.e. the same file will be restored multiple times, once
 *  for each time it was backed up.
 */

const char *uar_jobid_fileindex_from_dir[] = {
   /* Mysql */
   "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
   "WHERE Job.JobId IN (%s) "
   "AND Job.JobId=File.JobId "
   "AND Path.Path='%s' "
   "AND Client.Name='%s' "
   "AND Job.ClientId=Client.ClientId "
   "AND Path.PathId=File.Pathid "
   "AND Filename.FilenameId=File.FilenameId "
   "GROUP BY File.FileIndex ",

   /* Postgresql */
   "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
   "WHERE Job.JobId IN (%s) "
   "AND Job.JobId=File.JobId "
   "AND Path.Path='%s' "
   "AND Client.Name='%s' "
   "AND Job.ClientId=Client.ClientId "
   "AND Path.PathId=File.Pathid "
   "AND Filename.FilenameId=File.FilenameId",

   /* SQLite3 */
   "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
   "WHERE Job.JobId IN (%s) "
   "AND Job.JobId=File.JobId "
   "AND Path.Path='%s' "
   "AND Client.Name='%s' "
   "AND Job.ClientId=Client.ClientId "
   "AND Path.PathId=File.Pathid "
   "AND Filename.FilenameId=File.FilenameId "
   "GROUP BY File.FileIndex "
};

const char *sql_media_order_most_recently_written[] = {
   /* Mysql */
   "ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId",

   /* Postgresql */
   "ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId",

   /* SQLite3 */
   "ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId"
};

const char *sql_get_max_connections[] = {
   /* Mysql */
   "SHOW VARIABLES LIKE 'max_connections'",
   /* PostgreSQL */
   "SHOW max_connections",
   /* SQLite */
   "SELECT  0"
};

/*
 *  The Group By can return strange numbers when having multiple
 *  version of a file in the same dataset.
 */
const char *default_sql_bvfs_select =
"CREATE TABLE %s AS "
"SELECT File.JobId, File.FileIndex, File.FileId "
"FROM Job, File, ( "
    "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId "
       "FROM btemp%s GROUP BY PathId, FilenameId "
    ") AS T1 JOIN Filename USING (FilenameId) "
"WHERE T1.JobTDate = Job.JobTDate "
  "AND Job.JobId = File.JobId "
  "AND T1.PathId = File.PathId "
  "AND T1.FilenameId = File.FilenameId "
  "AND File.FileIndex > 0 "
  "AND Job.JobId IN (SELECT DISTINCT JobId FROM btemp%s) ";

const char *sql_bvfs_select[] =
{
   /* MySQL */
   default_sql_bvfs_select,
   /* PostgreSQL */
   "CREATE TABLE %s AS ( "
        "SELECT JobId, FileIndex, FileId "
          "FROM ( "
             "SELECT DISTINCT ON (PathId, FilenameId) "
                    "JobId, FileIndex, FileId "
               "FROM btemp%s "
              "ORDER BY PathId, FilenameId, JobTDate DESC "
          ") AS T "
          "WHERE FileIndex > 0)",
   /* SQLite */
   default_sql_bvfs_select
};

static const char *sql_bvfs_list_files_default =
"SELECT 'F', T1.PathId, T1.FilenameId, Filename.Name, "
        "File.JobId, File.LStat, File.FileId "
"FROM Job, File, ( "
    "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId "
      "FROM ( "
        "SELECT JobTDate, PathId, FilenameId "
          "FROM File JOIN Job USING (JobId) "
         "WHERE File.JobId IN (%s) AND PathId = %s "
          "UNION ALL "
        "SELECT JobTDate, PathId, FilenameId "
          "FROM BaseFiles "
               "JOIN File USING (FileId) "
               "JOIN Job  ON    (BaseJobId = Job.JobId) "
         "WHERE BaseFiles.JobId IN (%s)   AND PathId = %s "
       ") AS tmp GROUP BY PathId, FilenameId "
     "LIMIT %lld OFFSET %lld"
    ") AS T1 JOIN Filename USING (FilenameId) "
"WHERE T1.JobTDate = Job.JobTDate "
  "AND Job.JobId = File.JobId "
  "AND T1.PathId = File.PathId "
  "AND T1.FilenameId = File.FilenameId "
  "AND Filename.Name != '' "
  "AND File.FileIndex > 0 "
  " %s "                     /* AND Name LIKE '' */
  "AND (Job.JobId IN ( "
        "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
       "OR Job.JobId IN (%s)) ";

const char *sql_bvfs_list_files[] = {
   /* MySQL */
   sql_bvfs_list_files_default,

   /* JobId PathId JobId PathId WHERE? Filename? Limit Offset*/
   /* Postgresql */
 "SELECT Type, PathId, FilenameId, Name, JobId, LStat, FileId "
  "FROM ("
   "SELECT DISTINCT ON (FilenameId) 'F' as Type, PathId, T.FilenameId, "
    "Filename.Name, JobId, LStat, FileId, FileIndex "
     "FROM "
         "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5 "
            "FROM File WHERE JobId IN (%s) AND PathId = %s "
           "UNION ALL "
          "SELECT File.FileId, File.JobId, PathId, FilenameId, "
                 "File.FileIndex, LStat, MD5 "
            "FROM BaseFiles JOIN File USING (FileId) "
           "WHERE BaseFiles.JobId IN (%s) AND File.PathId = %s "
          ") AS T JOIN Job USING (JobId) JOIN Filename USING (FilenameId) "
          " WHERE Filename.Name != '' "
          " %s "               /* AND Name LIKE '' */
     "ORDER BY FilenameId, StartTime DESC "
   ") AS A WHERE A.FileIndex > 0 "
   "LIMIT %lld OFFSET %lld ",

   /* SQLite */
   sql_bvfs_list_files_default,

   /* SQLite */
   sql_bvfs_list_files_default
};

/* Basically the same thing than select_recent_version_with_basejob_and_delta_default,
 * but we specify a single file with FilenameId/PathId
 * 
 * Input:
 * 1 JobId to look at
 * 2 FilenameId
 * 3 PathId
 * 4 JobId to look at
 * 5 FilenameId
 * 6 PathId
 * 7 Jobid
 * 8 JobId
 */
const char *bvfs_select_delta_version_with_basejob_and_delta_default =
"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, "
       "File.FilenameId AS FilenameId, LStat, MD5, File.DeltaSeq AS DeltaSeq, "
       "Job.JobTDate AS JobTDate "
"FROM Job, File, ( "
    "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId, DeltaSeq "
      "FROM ( "
       "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all normal files*/
         "FROM File JOIN Job USING (JobId) "          /* from selected backup */
        "WHERE File.JobId IN (%s) AND FilenameId = %s AND PathId = %s "
         "UNION ALL "
       "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all files from */ 
         "FROM BaseFiles "                            /* BaseJob */
              "JOIN File USING (FileId) "
              "JOIN Job  ON    (BaseJobId = Job.JobId) "
        "WHERE BaseFiles.JobId IN (%s) "        /* Use Max(JobTDate) to find */
             " AND FilenameId = %s AND PathId = %s "
       ") AS tmp "
       "GROUP BY PathId, FilenameId, DeltaSeq "    /* the latest file version */
    ") AS T1 "
"WHERE (Job.JobId IN ( "  /* Security, we force JobId to be valid */
        "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
        "OR Job.JobId IN (%s)) "
  "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */
  "AND Job.JobId = File.JobId "     /* Job/File record */
  "AND T1.PathId = File.PathId "
  "AND T1.FilenameId = File.FilenameId";


const char *bvfs_select_delta_version_with_basejob_and_delta[] =
{
   /* MySQL */
   bvfs_select_delta_version_with_basejob_and_delta_default,

   /* Postgresql */    /* The DISTINCT ON () permits to avoid extra join */
   "SELECT DISTINCT ON (FilenameId, PathId, DeltaSeq) JobTDate, JobId, FileId, "
         "FileIndex, PathId, FilenameId, LStat, MD5, DeltaSeq "
   "FROM "
    "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5,DeltaSeq "
         "FROM File WHERE JobId IN (%s) AND FilenameId = %s AND PathId = %s "
        "UNION ALL "
       "SELECT File.FileId, File.JobId, PathId, FilenameId, "
              "File.FileIndex, LStat, MD5, DeltaSeq "
         "FROM BaseFiles JOIN File USING (FileId) "
        "WHERE BaseFiles.JobId IN (%s) AND FilenameId = %s AND PathId = %s "
       ") AS T JOIN Job USING (JobId) "
   "ORDER BY FilenameId, PathId, DeltaSeq, JobTDate DESC ",

   /* SQLite */
   bvfs_select_delta_version_with_basejob_and_delta_default
};


const char *batch_lock_path_query[] = {
   /* Mysql */
   "LOCK TABLES Path write, batch write, Path as p write",

   /* Postgresql */
   "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE",

   /* SQLite3 */
   "BEGIN"
};

const char *batch_lock_filename_query[] = {
   /* Mysql */
   "LOCK TABLES Filename write, batch write, Filename as f write",

   /* Postgresql */
   "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE",

   /* SQLite3 */
   "BEGIN"
};

const char *batch_unlock_tables_query[] = {
   /* Mysql */
   "UNLOCK TABLES",

   /* Postgresql */
   "COMMIT",

   /* SQLite3 */
   "COMMIT"
};

const char *batch_fill_path_query[] = {
   /* Mysql */
   "INSERT INTO Path (Path) "
      "SELECT a.Path FROM "
         "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS "
         "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)",
 
   /* PostgreSQL */
   "INSERT INTO Path (Path)"
      "SELECT a.Path FROM "
         "(SELECT DISTINCT Path FROM batch) AS a "
       "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ",
 
   /* SQLite */
   "INSERT INTO Path (Path)"
      "SELECT DISTINCT Path FROM batch "
      "EXCEPT SELECT Path FROM Path"
};

const char *batch_fill_filename_query[] = {
   /* Mysql */
   "INSERT INTO Filename (Name) "
      "SELECT a.Name FROM "
         "(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS "
         "(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)",

   /* Postgresql */
   "INSERT INTO Filename (Name) "
      "SELECT a.Name FROM "
         "(SELECT DISTINCT Name FROM batch) as a "
       "WHERE NOT EXISTS "
        "(SELECT Name FROM Filename WHERE Name = a.Name)",

   /* SQLite3 */
   "INSERT INTO Filename (Name) "
      "SELECT DISTINCT Name FROM batch "
      "EXCEPT SELECT Name FROM Filename"
};

const char *match_query[] = {
   /* Mysql */
   "REGEXP",
   /* PostgreSQL */
   "~", 
   /* SQLite */
   "LIKE"                       /* MATCH doesn't seems to work anymore... */
}; 
 
static const char *insert_counter_values_default =
   "INSERT INTO Counters (Counter, MinValue, "
   "MaxValue, CurrentValue, WrapCounter) "
   "VALUES ('%s','%d','%d','%d','%s')";

const char *insert_counter_values[] = {
   /* MySQL */
   "INSERT INTO Counters (Counter, Counters.MinValue, "
   "Counters.MaxValue, CurrentValue, WrapCounter) "
   "VALUES ('%s','%d','%d','%d','%s')",

   /* PostgreSQL */
   insert_counter_values_default,

   /* SQLite */
   insert_counter_values_default
};

static const char *select_counter_values_default = 
   "SELECT MinValue, MaxValue, CurrentValue, WrapCounter"
   " FROM Counters WHERE Counter='%s'";
 
const char *select_counter_values[] =
{
   /* MySQL */
   "SELECT Counters.MinValue, Counters.MaxValue, CurrentValue, WrapCounter"
   " FROM Counters WHERE Counter='%s'",
 
   /* PostgreSQL */
   select_counter_values_default, 
 
   /* SQLite */
   select_counter_values_default 
}; 
 
static const char *update_counter_values_default = 
   "UPDATE Counters SET MinValue=%d, MaxValue=%d, CurrentValue=%d,"
    "WrapCounter='%s' WHERE Counter='%s'";
 
const char *update_counter_values[] =
{
   /* MySQL */
   "UPDATE Counters SET Counters.MinValue=%d, Counters.MaxValue=%d,"
     "CurrentValue=%d, WrapCounter='%s' WHERE Counter='%s'",
   /* PostgreSQL */
   update_counter_values_default, 
   /* SQLite */
   update_counter_values_default 
}; 
 
static const char *expired_volumes_defaults =
"SELECT Media.VolumeName  AS volumename,"
       "Media.LastWritten AS lastwritten"
" FROM  Media"
" WHERE VolStatus IN ('Full', 'Used')"
     " AND ( Media.LastWritten +  Media.VolRetention ) < NOW()"
     " %s ";

const char *expired_volumes[] = {
   /* MySQL */
   expired_volumes_defaults,
   /* PostgreSQL */
   "SELECT Media.VolumeName, Media.LastWritten "
   " FROM  Media "
   " WHERE VolStatus IN ('Full', 'Used') "
     " AND ( Media.LastWritten + (interval '1 second' * Media.VolRetention ) < NOW()) "
     " %s ",
   /* SQLite */
   expired_volumes_defaults
};

const char *expires_in[] = {
   /* MySQL */
   //"(GREATEST(0, CAST(UNIX_TIMESTAMP(LastWritten) + VolRetention AS SIGNED) - UNIX_TIMESTAMP(NOW())))",
   "(CASE WHEN UNIX_TIMESTAMP(LastWritten) = 0 THEN 0 "
   "ELSE (FROM_UNIXTIME( GREATEST(0, CAST(UNIX_TIMESTAMP(NOW() ) + UNIX_TIMESTAMP(LastWritten) + VolRetention AS SIGNED) - UNIX_TIMESTAMP(NOW() ) ),"
   " '%Y-%m-%d %H:%i:%s')) END)",
   /* PostgreSQL */
   "GREATEST(0, (extract('epoch' from LastWritten + VolRetention * interval '1second' - NOW())::bigint))",
   /* SQLite */
   "MAX(0, (strftime('%s', LastWritten) + VolRetention - strftime('%s', datetime('now', 'localtime'))))"
};

Attachment: sql_cmds.c.patch
Description: Binary data

/*
   Bacula(R) - The Network Backup Solution

   Copyright (C) 2000-2016 Kern Sibbald

   The original author of Bacula is Kern Sibbald, with contributions
   from many others, a complete list can be found in the file AUTHORS.

   You may use this file and others of this release according to the
   license defined in the LICENSE file, which includes the Affero General
   Public License, v3.0 ("AGPLv3") and some additional permissions and
   terms pursuant to its AGPLv3 Section 7.

   This notice must be preserved when any source code is 
   conveyed and/or propagated.

   Bacula(R) is a registered trademark of Kern Sibbald.
*/
/*
 * Bacula Catalog Database List records interface routines
 *
 *    Written by Kern Sibbald, March 2000
 *
 */

#include  "bacula.h"

#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL

#include  "cats.h"

/* -----------------------------------------------------------------------
 *
 *   Generic Routines (or almost generic)
 *
 * -----------------------------------------------------------------------
 */

#define append_filter(buf, sql)  \
   do {                          \
      if (*buf) {                \
         pm_strcat(buf, " AND ");\
      } else {                   \
         pm_strcpy(buf, " WHERE ");\
      }                          \
      pm_strcat(buf, sql);       \
   } while (0)

/*
 * Submit general SQL query
 */
int BDB::bdb_list_sql_query(JCR *jcr, const char *query, DB_LIST_HANDLER *sendit,
                      void *ctx, int verbose, e_list_type type)
{
   bdb_lock();
   if (!sql_query(query, QF_STORE_RESULT)) {
      Mmsg(errmsg, _("Query failed: %s\n"), sql_strerror());
      if (verbose) {
         sendit(ctx, errmsg);
      }
      bdb_unlock();
      return 0;
   }

   list_result(jcr,this, sendit, ctx, type);
   sql_free_result();
   bdb_unlock();
   return 1;
}

void BDB::bdb_list_pool_records(JCR *jcr, POOL_DBR *pdbr,
                     DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   char esc[MAX_ESCAPE_NAME_LENGTH];

   bdb_lock();
   bdb_escape_string(jcr, esc, pdbr->Name, strlen(pdbr->Name));

   if (type == VERT_LIST) {
      if (pdbr->Name[0] != 0) {
         Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog,"
            "AcceptAnyVolume,VolRetention,VolUseDuration,MaxVolJobs,convert_bytes_to_humanreadable(MaxVolBytes) AS MaxVolBytes,"
            "AutoPrune,Recycle,PoolType,LabelFormat,Enabled,ScratchPoolId,"
            "RecyclePoolId,LabelType "
            " FROM Pool WHERE Name='%s'", esc);
      } else {
         Mmsg(cmd, "tttttt PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog,"
            "AcceptAnyVolume,VolRetention,VolUseDuration,MaxVolJobs,convert_bytes_to_humanreadable(MaxVolBytes) AS MaxVolBytes,"
            "AutoPrune,Recycle,PoolType,LabelFormat,Enabled,ScratchPoolId,"
            "RecyclePoolId,LabelType "
            " FROM Pool ORDER BY PoolId");
      }
   } else {
      if (pdbr->Name[0] != 0) {
         Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,convert_bytes_to_humanreadable(MaxVolBytes) AS MaxVolBytes, VolRetention, Enabled,PoolType,LabelFormat "
           "FROM Pool WHERE Name='%s'", esc);
      } else {
         Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,convert_bytes_to_humanreadable(MaxVolBytes) AS MaxVolBytes, VolRetention, Enabled,PoolType,LabelFormat "
           "FROM Pool ORDER BY PoolId");
      }
   }

   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return;
   }

   list_result(jcr, this, sendit, ctx, type);

   sql_free_result();
   bdb_unlock();
}

void BDB::bdb_list_client_records(JCR *jcr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   bdb_lock();
   if (type == VERT_LIST) {
      Mmsg(cmd, "SELECT ClientId,Name,Uname,AutoPrune,FileRetention,"
         "JobRetention "
         "FROM Client ORDER BY ClientId");
   } else {
      Mmsg(cmd, "SELECT ClientId,Name,FileRetention,JobRetention "
         "FROM Client ORDER BY ClientId");
   }

   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return;
   }

   list_result(jcr, this, sendit, ctx, type);

   sql_free_result();
   bdb_unlock();
}

/*
 * List restore objects
 *
 * JobId | JobIds: List RestoreObjects for specific Job(s)
 * It is possible to specify the ObjectType using FileType field.
 */
void BDB::bdb_list_restore_objects(JCR *jcr, ROBJECT_DBR *rr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   POOL_MEM filter;
   char  ed1[50];
   char *jobid;

   if (rr->JobIds && is_a_number_list(rr->JobIds)) {
      jobid = rr->JobIds;

   } else if (rr->JobId) {
      jobid = edit_int64(rr->JobId, ed1);

   } else {
      return;
   }

   if (rr->FileType > 0) {
      Mmsg(filter, "AND ObjectType = %d ", rr->FileType);
   }

   bdb_lock();
   if (type == VERT_LIST) {
      Mmsg(cmd, "SELECT JobId, RestoreObjectId, ObjectName, "
           "PluginName, ObjectType "
           "FROM RestoreObject JOIN Job USING (JobId) WHERE JobId IN (%s) %s "
           "ORDER BY JobTDate ASC, RestoreObjectId",
           jobid, filter.c_str());
   } else {
      Mmsg(cmd, "SELECT JobId, RestoreObjectId, ObjectName, "
           "PluginName, ObjectType, ObjectLength "
           "FROM RestoreObject JOIN Job USING (JobId) WHERE JobId IN (%s) %s "
           "ORDER BY JobTDate ASC, RestoreObjectId",
           jobid, filter.c_str());
   }

   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return;
   }

   list_result(jcr, this, sendit, ctx, type);

   sql_free_result();
   bdb_unlock();
}

/*
 * If VolumeName is non-zero, list the record for that Volume
 *   otherwise, list the Volumes in the Pool specified by PoolId
 */
void BDB::bdb_list_media_records(JCR *jcr, MEDIA_DBR *mdbr,
                      DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   char ed1[50];
   char esc[MAX_ESCAPE_NAME_LENGTH];
   const char *expiresin = expires_in[bdb_get_type_index()];

   bdb_lock();
   bdb_escape_string(jcr, esc, mdbr->VolumeName, strlen(mdbr->VolumeName));

   if (type == VERT_LIST) {
      if (mdbr->VolumeName[0] != 0) {
         Mmsg(cmd, "SELECT MediaId,VolumeName,Slot,PoolId,"
            "MediaType,MediaTypeId,FirstWritten,LastWritten,LabelDate,VolJobs,"
            "VolFiles,VolBlocks,VolMounts,convert_bytes_to_humanreadable(VolBytes) AS VolBytes,VolABytes,VolAPadding,"
            "VolHoleBytes,VolHoles,VolErrors,VolWrites,"
            "VolCapacityBytes,VolStatus,Enabled,Recycle,VolRetention,"
            "VolUseDuration,MaxVolJobs,MaxVolFiles,convert_bytes_to_humanreadable(MaxVolBytes) AS MaxVolBytes,InChanger,"
            "EndFile,EndBlock,VolParts,LabelType,StorageId,DeviceId,"
            "MediaAddressing,VolReadTime,VolWriteTime,"
            "LocationId,RecycleCount,InitialWrite,ScratchPoolId,RecyclePoolId, "
            "ActionOnPurge,%s AS ExpiresIn, Comment"
            " FROM Media WHERE Media.VolumeName='%s'", expiresin, esc);
      } else {
         Mmsg(cmd, "SELECT MediaId,VolumeName,Slot,PoolId,"
            "MediaType,MediaTypeId,FirstWritten,LastWritten,LabelDate,VolJobs,"
            "VolFiles,VolBlocks,VolMounts,convert_bytes_to_humanreadable(VolBytes) AS VolBytes,VolABytes,VolAPadding,"
            "VolHoleBytes,VolHoles,VolErrors,VolWrites,"
            "VolCapacityBytes,VolStatus,Enabled,Recycle,VolRetention,"
            "VolUseDuration,MaxVolJobs,MaxVolFiles,convert_bytes_to_humanreadable(MaxVolBytes) AS MaxVolBytes,InChanger,"
            "EndFile,EndBlock,VolParts,LabelType,StorageId,DeviceId,"
            "MediaAddressing,VolReadTime,VolWriteTime,"
            "LocationId,RecycleCount,InitialWrite,ScratchPoolId,RecyclePoolId, "
            "ActionOnPurge,%s AS ExpiresIn, Comment"
            " FROM Media WHERE Media.PoolId=%s ORDER BY MediaId",
              expiresin, edit_int64(mdbr->PoolId, ed1));
      }
   } else {
      if (mdbr->VolumeName[0] != 0) {
         Mmsg(cmd, "SELECT MediaId,VolumeName,VolStatus,Enabled,"
            "convert_bytes_to_humanreadable(VolBytes) AS VolBytes,VolFiles,VolRetention,Recycle,Slot,InChanger,MediaType,LastWritten,%s AS ExpiresIn "
              "FROM Media WHERE Media.VolumeName='%s'", expiresin, esc);
      } else {
         Mmsg(cmd, "SELECT MediaId,VolumeName,VolStatus,Enabled,"
            "convert_bytes_to_humanreadable(VolBytes) AS VolBytes,VolFiles,VolRetention,Recycle,Slot,InChanger,MediaType,LastWritten,%s AS ExpiresIn "
            "FROM Media WHERE Media.PoolId=%s ORDER BY MediaId",
              expiresin, edit_int64(mdbr->PoolId, ed1));
      }
   }

   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return;
   }

   list_result(jcr, this, sendit, ctx, type);

   sql_free_result();
   bdb_unlock();
}

void BDB::bdb_list_jobmedia_records(JCR *jcr, uint32_t JobId,
                              DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   char ed1[50];

   bdb_lock();
   if (type == VERT_LIST) {
      if (JobId > 0) {                   /* do by JobId */
         Mmsg(cmd, "SELECT JobMediaId,JobId,Media.MediaId,Media.VolumeName,"
            "FirstIndex,LastIndex,StartFile,JobMedia.EndFile,StartBlock,"
            "JobMedia.EndBlock "
            "FROM JobMedia,Media WHERE Media.MediaId=JobMedia.MediaId "
            "AND JobMedia.JobId=%s", edit_int64(JobId, ed1));
      } else {
         Mmsg(cmd, "SELECT JobMediaId,JobId,Media.MediaId,Media.VolumeName,"
            "FirstIndex,LastIndex,StartFile,JobMedia.EndFile,StartBlock,"
            "JobMedia.EndBlock "
            "FROM JobMedia,Media WHERE Media.MediaId=JobMedia.MediaId");
      }

   } else {
      if (JobId > 0) {                   /* do by JobId */
         Mmsg(cmd, "SELECT JobId,Media.VolumeName,FirstIndex,LastIndex "
            "FROM JobMedia,Media WHERE Media.MediaId=JobMedia.MediaId "
            "AND JobMedia.JobId=%s", edit_int64(JobId, ed1));
      } else {
         Mmsg(cmd, "SELECT JobId,Media.VolumeName,FirstIndex,LastIndex "
            "FROM JobMedia,Media WHERE Media.MediaId=JobMedia.MediaId");
      }
   }
   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return;
   }

   list_result(jcr, this, sendit, ctx, type);

   sql_free_result();
   bdb_unlock();
}


void BDB::bdb_list_copies_records(JCR *jcr, uint32_t limit, char *JobIds,
                            DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   POOL_MEM str_limit(PM_MESSAGE);
   POOL_MEM str_jobids(PM_MESSAGE);

   if (limit > 0) {
      Mmsg(str_limit, " LIMIT %d", limit);
   }

   if (JobIds && JobIds[0]) {
      Mmsg(str_jobids, " AND (Job.PriorJobId IN (%s) OR Job.JobId IN (%s)) ",
           JobIds, JobIds);
   }

   bdb_lock();
   Mmsg(cmd,
   "SELECT DISTINCT Job.PriorJobId AS JobId, Job.Job, "
                   "Job.JobId AS CopyJobId, Media.MediaType "
     "FROM Job "
     "JOIN JobMedia USING (JobId) "
     "JOIN Media    USING (MediaId) "
    "WHERE Job.Type = '%c' %s ORDER BY Job.PriorJobId DESC %s",
        (char) JT_JOB_COPY, str_jobids.c_str(), str_limit.c_str());

   if (!QueryDB(jcr, cmd)) {
      goto bail_out;
   }

   if (sql_num_rows()) {
      if (JobIds && JobIds[0]) {
         sendit(ctx, _("These JobIds have copies as follows:\n"));
      } else {
         sendit(ctx, _("The catalog contains copies as follows:\n"));
      }

      list_result(jcr, this, sendit, ctx, type);
   }

   sql_free_result();

bail_out:
   bdb_unlock();
}

void BDB::bdb_list_joblog_records(JCR *jcr, uint32_t JobId,
                              DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   char ed1[50];
 
   if (JobId <= 0) {
      return;
   }
   bdb_lock();
   if (type == VERT_LIST) {
      Mmsg(cmd, "SELECT Time,LogText FROM Log "
           "WHERE Log.JobId=%s ORDER BY LogId ASC", edit_int64(JobId, ed1));
   } else {
      Mmsg(cmd, "SELECT LogText FROM Log "
           "WHERE Log.JobId=%s ORDER BY LogId ASC", edit_int64(JobId, ed1));
   }
   if (!QueryDB(jcr, cmd)) {
      goto bail_out;
   }

   list_result(jcr, this, sendit, ctx, type);

   sql_free_result();

bail_out:
   bdb_unlock();
}


/*
 * List Job record(s) that match JOB_DBR
 *
 *  Currently, we return all jobs or if jr->JobId is set,
 *  only the job with the specified id.
 */
alist *BDB::bdb_list_job_records(JCR *jcr, JOB_DBR *jr, DB_LIST_HANDLER *sendit,
                    void *ctx, e_list_type type)
{
   char ed1[50];
   char limit[50];
   char esc[MAX_ESCAPE_NAME_LENGTH];
   alist *list = NULL;
   POOLMEM *where  = get_pool_memory(PM_MESSAGE);
   POOLMEM *tmp    = get_pool_memory(PM_MESSAGE);
   const char *order = "ASC";
   *where = 0;

   bdb_lock();
   if (jr->order == 1) {
      order = "DESC";
   }
   if (jr->limit > 0) {
      snprintf(limit, sizeof(limit), " LIMIT %d", jr->limit);
   } else {
      limit[0] = 0;
   }
   if (jr->Name[0]) {
      bdb_escape_string(jcr, esc, jr->Name, strlen(jr->Name));
      Mmsg(tmp, " Name='%s' ", esc);
      append_filter(where, tmp);

   } else if (jr->JobId != 0) {
      Mmsg(tmp, " JobId=%s ", edit_int64(jr->JobId, ed1));
      append_filter(where, tmp);

   } else if (jr->Job[0] != 0) {
      bdb_escape_string(jcr, esc, jr->Job, strlen(jr->Job));
      Mmsg(tmp, " Job='%s' ", esc);
      append_filter(where, tmp);
   }

   if (type == INCOMPLETE_JOBS && jr->JobStatus == JS_FatalError) {
      Mmsg(tmp, " JobStatus IN ('E', 'f') ");
      append_filter(where, tmp);

   } else if (jr->JobStatus) {
      Mmsg(tmp, " JobStatus='%c' ", jr->JobStatus);
      append_filter(where, tmp);
   }

   if (jr->JobType) {
      Mmsg(tmp, " Type='%c' ", jr->JobType);
      append_filter(where, tmp);
   }

   if (jr->JobErrors > 0) {
      Mmsg(tmp, " JobErrors > 0 ");
      append_filter(where, tmp);
   }

   if (jr->ClientId > 0) {
      Mmsg(tmp, " ClientId=%s ", edit_int64(jr->ClientId, ed1));
      append_filter(where, tmp);
   }

   switch (type) {
   case VERT_LIST:
      Mmsg(cmd,
           "SELECT JobId,Job,Job.Name,PurgedFiles,Type,Level,"
           "Job.ClientId,Client.Name as ClientName,JobStatus,SchedTime,"
           "StartTime,EndTime,RealEndTime,JobTDate,"
           "VolSessionId,VolSessionTime,JobFiles,convert_bytes_to_humanreadable(JobBytes) AS JobBytes,ReadBytes,JobErrors,"
           "JobMissingFiles,Job.PoolId,Pool.Name as PooLname,PriorJobId,"
           "Job.FileSetId,FileSet.FileSet,Job.HasBase,Job.HasCache,Job.Comment "
           "FROM Job JOIN Client USING (ClientId) LEFT JOIN Pool USING (PoolId) "
           "LEFT JOIN FileSet USING (FileSetId) %s "
           "ORDER BY StartTime %s %s", where, order, limit);
      break;
   case HORZ_LIST:
      Mmsg(cmd,
           "SELECT JobId,Name,StartTime,Type,Level,JobFiles,convert_bytes_to_humanreadable(JobBytes) AS JobBytes,JobStatus "
           "FROM Job %s ORDER BY StartTime %s,JobId %s %s", where, order, order, limit);
      break;
   case INCOMPLETE_JOBS:
      Mmsg(cmd,
           "SELECT JobId,Name,StartTime,Type,Level,JobFiles,convert_bytes_to_humanreadable(JobBytes) AS JobBytes,JobStatus "
             "FROM Job %s ORDER BY StartTime %s,JobId %s %s",
           where, order, order, limit);
      break;
   default:
      break;
   }
   Dmsg1(100, "SQL: %s\n", cmd);

   free_pool_memory(tmp);
   free_pool_memory(where);

   Dmsg1(000, "cmd: %s\n", cmd);
   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return NULL;
   }
   if (type == INCOMPLETE_JOBS) {
      SQL_ROW row;
      list = New(alist(10));
      sql_data_seek(0);
      for (int i=0; (row=sql_fetch_row()) != NULL; i++) {
         list->append(bstrdup(row[0]));
      }
   }
   sql_data_seek(0);
   list_result(jcr, this, sendit, ctx, type);
   sql_free_result();
   bdb_unlock();
   return list;
}

/*
 * List Job totals
 *
 */
void BDB::bdb_list_job_totals(JCR *jcr, JOB_DBR *jr, DB_LIST_HANDLER *sendit, void *ctx)
{
   bdb_lock();

   /* List by Job */
   Mmsg(cmd, "SELECT  count(*) AS Jobs,sum(JobFiles) "
      "AS Files,convert_bytes_to_humanreadable(sum(JobBytes)) AS Bytes,Name AS Job FROM Job GROUP BY Name");

   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return;
   }

   list_result(jcr, this, sendit, ctx, HORZ_LIST);

   sql_free_result();

   /* Do Grand Total */
   Mmsg(cmd, "SELECT count(*) AS Jobs,sum(JobFiles) "
        "AS Files,convert_bytes_to_humanreadable(sum(JobBytes)) As Bytes FROM Job");

   if (!QueryDB(jcr, cmd)) {
      bdb_unlock();
      return;
   }

   list_result(jcr, this, sendit, ctx, HORZ_LIST);

   sql_free_result();
   bdb_unlock();
}

void BDB::bdb_list_files_for_job(JCR *jcr, JobId_t jobid, DB_LIST_HANDLER *sendit, void *ctx)
{
   char ed1[50];
   LIST_CTX lctx(jcr, this, sendit, ctx, HORZ_LIST);

   bdb_lock();

   /*
    * Stupid MySQL is NON-STANDARD !
    */
   if (bdb_get_type_index() == SQL_TYPE_MYSQL) {
      Mmsg(cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename "
           "FROM (SELECT PathId, FilenameId FROM File WHERE JobId=%s "
                  "UNION ALL "
                 "SELECT PathId, FilenameId "
                   "FROM BaseFiles JOIN File "
                         "ON (BaseFiles.FileId = File.FileId) "
                  "WHERE BaseFiles.JobId = %s"
           ") AS F, Filename,Path "
           "WHERE Filename.FilenameId=F.FilenameId "
           "AND Path.PathId=F.PathId",
           edit_int64(jobid, ed1), ed1);
   } else {
      Mmsg(cmd, "SELECT Path.Path||Filename.Name AS Filename "
           "FROM (SELECT PathId, FilenameId FROM File WHERE JobId=%s "
                  "UNION ALL "
                 "SELECT PathId, FilenameId "
                   "FROM BaseFiles JOIN File "
                         "ON (BaseFiles.FileId = File.FileId) "
                  "WHERE BaseFiles.JobId = %s"
           ") AS F, Filename,Path "
           "WHERE Filename.FilenameId=F.FilenameId "
           "AND Path.PathId=F.PathId",
           edit_int64(jobid, ed1), ed1);
   }

   if (!bdb_big_sql_query(cmd, list_result, &lctx)) {
       bdb_unlock();
       return;
   }

   lctx.send_dashes();

   sql_free_result();
   bdb_unlock();
}

void BDB::bdb_list_base_files_for_job(JCR *jcr, JobId_t jobid, DB_LIST_HANDLER *sendit, void *ctx)
{
   char ed1[50];
   LIST_CTX lctx(jcr, this, sendit, ctx, HORZ_LIST);

   bdb_lock();

   /*
    * Stupid MySQL is NON-STANDARD !
    */
   if (bdb_get_type_index() == SQL_TYPE_MYSQL) {
      Mmsg(cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename "
           "FROM BaseFiles, File, Filename, Path "
           "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId "
           "AND BaseFiles.FileId = File.FileId "
           "AND Filename.FilenameId=File.FilenameId "
           "AND Path.PathId=File.PathId",
         edit_int64(jobid, ed1));
   } else {
      Mmsg(cmd, "SELECT Path.Path||Filename.Name AS Filename "
           "FROM BaseFiles, File, Filename, Path "
           "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId "
           "AND BaseFiles.FileId = File.FileId "
           "AND Filename.FilenameId=File.FilenameId "
           "AND Path.PathId=File.PathId",
           edit_int64(jobid, ed1));
   }

   if (!bdb_big_sql_query(cmd, list_result, &lctx)) {
       bdb_unlock();
       return;
   }

   lctx.send_dashes();

   sql_free_result();
   bdb_unlock();
}

void BDB::bdb_list_snapshot_records(JCR *jcr, SNAPSHOT_DBR *sdbr,
              DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
{
   POOLMEM *filter = get_pool_memory(PM_MESSAGE);
   POOLMEM *tmp    = get_pool_memory(PM_MESSAGE);
   POOLMEM *esc    = get_pool_memory(PM_MESSAGE);
   char ed1[50];

   bdb_lock();
   *filter = 0;

   if (sdbr->Name[0]) {
      bdb_escape_string(jcr, esc, sdbr->Name, strlen(sdbr->Name));
      Mmsg(tmp, "Name='%s'", esc);
      append_filter(filter, tmp);
   }
   if (sdbr->SnapshotId > 0) {
      Mmsg(tmp, "Snapshot.SnapshotId=%d", sdbr->SnapshotId);
      append_filter(filter, tmp);
   }
   if (sdbr->ClientId > 0) {
      Mmsg(tmp, "Snapshot.ClientId=%d", sdbr->ClientId);
      append_filter(filter, tmp);
   }
   if (sdbr->JobId > 0) {
      Mmsg(tmp, "Snapshot.JobId=%d", sdbr->JobId);
      append_filter(filter, tmp);
   }
   if (*sdbr->Client) {
      bdb_escape_string(jcr, esc, sdbr->Client, strlen(sdbr->Client));
      Mmsg(tmp, "Client.Name='%s'", esc);
      append_filter(filter, tmp);
   }
   if (sdbr->Device && *(sdbr->Device)) {
      esc = check_pool_memory_size(esc, strlen(sdbr->Device) * 2 + 1);
      bdb_escape_string(jcr, esc, sdbr->Device, strlen(sdbr->Device));
      Mmsg(tmp, "Device='%s'", esc);
      append_filter(filter, tmp);
   }
   if (*sdbr->Type) {
      bdb_escape_string(jcr, esc, sdbr->Type, strlen(sdbr->Type));
      Mmsg(tmp, "Type='%s'", esc);
      append_filter(filter, tmp);
   }
   if (*sdbr->created_before) {
      bdb_escape_string(jcr, esc, sdbr->created_before, strlen(sdbr->created_before));
      Mmsg(tmp, "CreateDate <= '%s'", esc);
      append_filter(filter, tmp);
   }
   if (*sdbr->created_after) {
      bdb_escape_string(jcr, esc, sdbr->created_after, strlen(sdbr->created_after));
      Mmsg(tmp, "CreateDate >= '%s'", esc);
      append_filter(filter, tmp);
   }
   if (sdbr->expired) {
      Mmsg(tmp, "CreateTDate < (%s - Retention)", edit_int64(time(NULL), ed1));
      append_filter(filter, tmp);
   }
   if (*sdbr->CreateDate) {
      bdb_escape_string(jcr, esc, sdbr->CreateDate, strlen(sdbr->CreateDate));
      Mmsg(tmp, "CreateDate = '%s'", esc);
      append_filter(filter, tmp);
   }

   if (sdbr->sorted_client) {
      pm_strcat(filter, " ORDER BY Client.Name, SnapshotId DESC");

   } else {
      pm_strcat(filter, " ORDER BY SnapshotId DESC");
   }

   if (type == VERT_LIST || type == ARG_LIST) {
      Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, CreateDate, Client.Name AS Client, "
           "FileSet.FileSet AS FileSet, JobId, Volume, Device, Type, Retention, Comment "
           "FROM Snapshot JOIN Client USING (ClientId) LEFT JOIN FileSet USING (FileSetId) %s", filter);

   } else if (type == HORZ_LIST) {
      Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, CreateDate, Client.Name AS Client, "
           "Device, Type "
           "FROM Snapshot JOIN Client USING (ClientId) %s", filter);
   }

   if (!QueryDB(jcr, cmd)) {
      goto bail_out;
   }

   list_result(jcr, this, sendit, ctx, type);

bail_out:
   sql_free_result();
   bdb_unlock();

   free_pool_memory(filter);
   free_pool_memory(esc);
   free_pool_memory(tmp);
}

#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */

Attachment: sql_list.c.patch
Description: Binary data

Attachment: function_convert_bytes_to_humanreadable.sql
Description: Binary data

------------------------------------------------------------------------------
Site24x7 APM Insight: Get Deep Visibility into Application Performance
APM + Mobile APM + RUM: Monitor 3 App instances at just $35/Month
Monitor end-to-end web transactions and take corrective actions now
Troubleshoot faster and improve end-user experience. Signup Now!
http://pubads.g.doubleclick.net/gampad/clk?id=272487151&iu=/4140
_______________________________________________
Bacula-users mailing list
Bacula-users@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/bacula-users

Reply via email to