I am wondering why I am seeing in my catalog lots of jobs older than the
JobRetention defined in the pools, and also older than the default JobRetention
assumed for the clients.
The volume recycling seems to work fine adhering to the VolumeRetention in the
pools.
To me it is a mystery, probably be cause I overlook some dependencies I am not
aware of.
Can someone please help me understanding this.
I hope I have provided all necessary ressources, including the Pruning job.
Cheers,
J/C
Job {
Name = "Pruning"
Description = "Prune all expired volumes"
Type = "Admin"
Schedule = "EveryNight"
JobDefs = "default-tier1"
PruneJobs = yes
PruneFiles = yes
PruneVolumes = yes
Enabled = yes
Runscript {
RunsWhen = "Before"
RunsOnClient = no
Console = "prune expired volume yes"
}
Priority = 15
AllowDuplicateJobs = no
}
JobDefs {
Name = "default-tier1"
Description = "Default backup job for Tier 1"
Type = "Backup"
Level = "Full"
Messages = "Standard"
Pool = "tier1-long"
FullBackupPool = "tier1-long"
IncrementalBackupPool = "tier1-short"
Client = “filer-fd"
Fileset = "EmptyFileset"
WriteBootstrap = "/disaster-recovery/bootstrap/%c_%n.bsr"
MaxFullInterval = 2678400
SpoolAttributes = no
Priority = 20
AllowMixedPriority = yes
AllowIncompleteJobs = no
Accurate = yes
AllowDuplicateJobs = no
}
Schedule {
Name = "EveryNight"
Run = at 22:00
}
Job {
Name = "catalog-tier1"
Description = "Backup Bacula MyCatalog to Tier 1 storage"
Pool = "tier1-long"
FullBackupPool = "tier1-long"
Fileset = "Catalog"
Schedule = "EveryNight-Full"
JobDefs = "various-tier1"
Enabled = yes
Runscript {
RunsWhen = "Before"
RunsOnClient = no
Command = "/opt/bacula/scripts/make_catalog_backup.pl MyCatalog"
}
Runscript {
RunsWhen = "After"
FailJobOnError = no
RunsOnClient = no
Command = "/opt/bacula/scripts/delete_catalog_backup"
}
Runscript {
RunsWhen = "After"
FailJobOnError = no
RunsOnClient = no
Console = "purge volume action=all allpools storage=unraid-tier1-storage"
}
Priority = 50
AllowIncompleteJobs = no
AllowDuplicateJobs = no
}
JobDefs {
Name = "various-tier1"
Type = "Backup"
Level = "Full"
Messages = "Standard"
Pool = “tier1-long"
FullBackupPool = "tier1-long"
IncrementalBackupPool = "tier1-short"
Client = “filer-fd"
Fileset = "EmptyFileset"
Schedule = "Third-Sat-Full_Even-Incr"
WriteBootstrap = "/disaster-recovery/bootstrap/%c_%n.bsr"
Priority = 20
AllowMixedPriority = yes
Accurate = yes
}
Pool {
Name = "tier1-short"
PoolType = "Backup"
LabelFormat = "tier1-short-vol-"
ActionOnPurge = "Truncate"
MaximumVolumes = 500
MaximumVolumeBytes = 20000000000
VolumeRetention = 3456000
NextPool = "tier2-short"
Storage = "tier1-storage"
ScratchPool = "Scratch"
Catalog = "MyCatalog"
FileRetention = 2592000
JobRetention = 2592000
}
Pool {
Name = "tier1-long"
PoolType = "Backup"
LabelFormat = "tier1-long-vol-"
ActionOnPurge = "Truncate"
MaximumVolumes = 500
MaximumVolumeBytes = 20000000000
VolumeRetention = 8640000
NextPool = "tier2-long"
Storage = "tier1-storage"
ScratchPool = "Scratch"
Catalog = "MyCatalog"
FileRetention = 7776000
JobRetention = 7776000
}
Client {
Name = “filer-fd"
Address = "127.0.0.1"
FdPort = 9102
Password = “redacted"
Catalog = "MyCatalog"
AutoPrune = yes
MaximumConcurrentJobs = 5
}
Client defaults are AutoPrune yes, JobRetention 180d, FileRetention 60d
_______________________________________________
Bacula-users mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/bacula-users