You can try the script I made for exporting all my Pg database.
Ideas were borrowed from pg_dumplo-0.0.5.
Make it sure that you have "Perl5 extension for PostgreSQL - Pg.pm"
installed.

sherwin

#!/usr/bin/perl -w
#
# Export large objects of all Pg database 
#  - Sherwin T. Daganato ([EMAIL PROTECTED])
#  - October 8, 2000  
#

use strict;
use Pg;

my $space = shift || die "Usage: $0 outputdir";

# make sure the directory doesn't end in '/'
$space =~ s/\/$//;

my $conn_all = Pg::connectdb("dbname=template1");
die $conn_all->errorMessage unless PGRES_CONNECTION_OK eq
$conn_all->status;

# find all database
my $sql = "SELECT datname FROM pg_database ORDER BY datname";
my $res_all = $conn_all->exec($sql);
die $conn_all->errorMessage unless PGRES_TUPLES_OK eq
$res_all->resultStatus;

my $counter = 0;
while (my ($database) = $res_all->fetchrow) {
  my $conn_db = Pg::connectdb("dbname=$database");
  die $conn_db->errorMessage unless PGRES_CONNECTION_OK eq
$conn_db->status;

  # find any candidate tables with columns of type oid
  $sql = qq/SELECT c.relname, a.attname 
            FROM pg_class c, pg_attribute a, pg_type t 
            WHERE a.attnum > 0 
            AND a.attrelid = c.oid 
            AND a.atttypid = t.oid 
            AND t.typname = 'oid' 
            AND c.relname NOT LIKE 'pg_%';
           /;

  my $res_db = $conn_db->exec($sql);
  die $conn_db->errorMessage unless PGRES_TUPLES_OK eq
$res_db->resultStatus;

  my $path;
  local (*F);
  while (my ($table, $fld) = $res_db->fetchrow) {

    # find large object id
    $sql = sprintf ("SELECT x.%s FROM %s x WHERE EXISTS (SELECT c.oid FROM
pg_class c WHERE c.relkind = 'l' AND x.%s = c.oid)",
                    $fld, $table, $fld);

    # find large object id    
    #$sql = sprintf ("SELECT x.%s FROM %s x, pg_class c WHERE x.%s = c.oid
and c.relkind = 'l'", 
    #                $fld, $table, $fld);                    

    my $res_tbl = $conn_db->exec($sql);
    die $conn_db->errorMessage unless PGRES_TUPLES_OK eq
$res_tbl->resultStatus;

    my $tuples;
    next unless (($tuples = $res_tbl->ntuples) > 0);
    $counter += $tuples;

    $path = sprintf ("%s/%s", $space, $database);

    if ( -d $path) {

      # creates file if it don't exist and appends to it
      open(F,">>$path/lo_dump.index") || die "\n $0 Cannot open $! \n"; 
    } else {

      # create dir for database
      mkdir($path, 0755) || die "\n Can't create $path: $! \n";

      # opens file for writing. overwrite existing file
      open(F, ">$path/lo_dump.index") || die "\n $0 Cannot open $! \n";

      # temporarily turn off warnings
      # there might be undef
      local $^W = 0;

      print F "#\n# This is the PostgreSQL large object dump index\n#\n";
      printf F "#\tDate:     %s\n", scalar(localtime);
      printf F "#\tHost:     %s\n", $conn_db->host;
      printf F "#\tDatabase: %s\n", $conn_db->db;
      printf F "#\tUser:     %s\n", $conn_db->user;
      printf F "#\n# oid\ttable\tattribut\tinfile\n#\n";
    }

    $path = sprintf ("%s/%s", $path, $table);
    
    # create dir for table
    mkdir($path, 0755) || die "\n Can't create $path: $! \n";
           
    $path = sprintf ("%s/%s", $path, $fld); 
  
    # create dir for field
    mkdir($path, 0755) || die "\n Can't create $path: $! \n";

    printf "dump %s.%s (%d large obj)\n", $table, $fld, $tuples;  

    while (my ($lobjOid) = $res_tbl->fetchrow) {

      $path = sprintf ("%s/%s/%s/%s/%s", 
        $space, $database, $table, $fld, $lobjOid);
       
      my $res_lobj = $conn_db->exec("BEGIN");
      die $conn_db->errorMessage unless PGRES_COMMAND_OK eq
$res_lobj->resultStatus;

      # export large object
      if ( 1 == $conn_db->lo_export($lobjOid, $path) ) {
        printf F "%s\t%s\t%s\t%s/%s/%s/%s\n", 
          $lobjOid, $table, $fld, $database, $table, $fld, $lobjOid;
      } else {
        printf STDERR "%s: %s\n", $conn_db->errorMessage, $0;
      }

      $res_lobj = $conn_db->exec("END");
      die $conn_db->errorMessage unless PGRES_COMMAND_OK eq
$res_lobj->resultStatus;
 
    }
    close(F);
  }
  undef $conn_db;
}

printf "\nExported %d large objects.\n\n", $counter;
undef $conn_all;


[EMAIL PROTECTED] writes:
>
>
>On Mon, 30 Oct 2000, [EMAIL PROTECTED] wrote:
>
>>Large objects are not dumped. It should be 
>>in the documentation for large objects.
>>
>>You need to write a script which writes them to
>>disk and then imports them back in after you have
>>installed your dbs.
>>
>>
>>Troy
>
>CREATE TABLE news      -- { chiave: id news ,newarchivio, newsnuove}
>(
>       "idnews"        SERIAL primary key,     
>       "oidnotizia"    OID,            -- news as large object 
>       "autore"        TEXTx           -- author
>);
>
>How should be done the script for this table ?
>
>I found something about large object only onthe programmesg guide are they
>present in other docs?
>
>Thanks in advance 
>
>Alex
>
>
>


Reply via email to