Hello,

I have set up a bacula installation with a Dell 132T autochanger (/dev/sg2).
It has two LOT3 drives, /dev/nst0 and /dev/nst1.
Both drives works fine with the mtx-changer script. Currently backup are 
done but only used drive 0. I would like to run jobs on both drive 0 and 
1, however when I start two jobs at the same, the first one starts and 
the second one starts two but wait for drive 0 to become free before 
writing.
Could you help me to get the second jobs, loads a tape in drive 1 and 
starts writing on drive 1, while drive 0 is used ?

Thanks in advance.

PS: See some parts of my config below!



Here is my storage daemon conf :

Storage {                             # definition of myself
  Name = srv-mpp-lrs-sd
  SDPort = 9103                  # Director's port     
  WorkingDirectory = "/var/lib/bacula"
  Pid Directory = "/var/run/bacula"
  Maximum Concurrent Jobs = 2
  SDAddress = 0.0.0.0
}
Director {
  Name = srv-mpp-lrs-dir
  Password = "xxx"
}
Director {
  Name = srv-mpp-lrs-mon
  Password = "xxx"
  Monitor = yes
}
# An autochanger device with two drives
Autochanger {
  Name = Dell-PV132T
  Device = ULTRIUM-TD3-1
  Device = ULTRIUM-TD3-2
  Changer Command = "/etc/bacula/scripts/mtx-changer %c %o %S %a %d"
  Changer Device = /dev/sg2
}
Device {
  Name = ULTRIUM-TD3-1
  Drive Index = 0
  Media Type = LTO3
  Archive Device = /dev/nst0
  AutomaticMount = yes;               # when device opened, read it
  AlwaysOpen = yes;
  RemovableMedia = yes;
  #RandomAccess = no;
  AutoChanger = yes
  # Enable the Alert command only if you have the mtx package loaded
  # Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'"
}
Device {
  Name = ULTRIUM-TD3-2
  Drive Index = 1
  Media Type = LTO3
  Archive Device = /dev/nst1
  AutomaticMount = yes;               # when device opened, read it
  AlwaysOpen = yes;
  RemovableMedia = yes;
  #RandomAccess = no;
  AutoChanger = yes
  # Enable the Alert command only if you have the mtx package loaded
  # Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'"
}
Messages {
  Name = Standard
  director = srv-mpp-lrs-dir = all
}



And some usefull parts of director conf :

Director {                            # define myself
  Name = srv-mpp-lrs-dir
  DIRport = 9101                # where we listen for UA connections
  QueryFile = "/etc/bacula/scripts/query.sql"
  WorkingDirectory = "/var/lib/bacula"
  PidDirectory = "/var/run/bacula"
  Password = "xxx"         # Console password
  Messages = Daemon
  DirAddress = 0.0.0.0
  Maximum Concurrent Jobs = 2
}
# Templates pour les jobs
JobDefs {
  Name = "DefaultJob"
  Type = Backup
  Storage = Autochanger
  Messages = Standard
  Client = srv-mpp-lrs-fd
  FileSet = "Full Set"
  Priority = 5
  # Do not wait more than 6days (next weekly)
  Max Start Delay = 6d
  Max Wait Time = 6d
  # Do not wait more than 23h (next daily)
  Incremental Max Wait Time = 23h
  Schedule = "Planning"
  Pool = Weekly, Daily
}
# Autochanger Dell PV132T (21x LTO3)
Storage {
  Name = Autochanger
  Address = srv-mpp-lrs
  SDPort = 9103
  Password = "xxx"
  Device = Dell-PV132T               # must be same as Device in Storage 
daemon
  Media Type = LTO3                  # must be same as MediaType in 
Storage daemon
  Autochanger = yes                  # enable for autochanger device
  Maximum Concurrent Jobs = 2
}


-- 
Adam CECILE                    Linbox / Free&ALter Soft
152 rue de Grigy               tél: +33 3 87 50 87 95
Technopôle Metz 2000           fax: +33 3 87 75 19 26           
57070 METZ - France            http://www.linbox.com


-------------------------------------------------------------------------
Take Surveys. Earn Cash. Influence the Future of IT
Join SourceForge.net's Techsay panel and you'll get the chance to share your
opinions on IT & business topics through brief surveys-and earn cash
http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV
_______________________________________________
Bacula-users mailing list
Bacula-users@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/bacula-users

Reply via email to