#
# bugs:
#
# 1) this same bug shows up with
#    - DBI-1.57, 1.58, 1.59 (latest svn code)
#    - perl-5.8.8, perl-5.8.8-latest, perl-5.9.4 and perl-5.9.5-latest
#    - SQLite-3.3.13, SQLite-3.4.0, SQLite-3.4.0-latest
#
# 2) problem is fixed by one of following
#    - export MALLOC_CHECK_=0 before running script
#    - $bugfix1 = 1
#    - $bugfix2 = 1
#

After running the attached code perl seg faults with:

   *** glibc detected *** /usr/bin/perl: \
       corrupted double-linked list: 0x0000000000895fa0 ***

Setting MALLOC_CHECK_ simply hides the bug.

Setting the variables $bugfix1 and $bugfix2 simply ensure there are
no duplicate records existing before code runs.

Also note the comments about $sth at the end of code. To get a clean
run $sth->finish and $sth=undef must be done.

It's been many years since I used DBI, so a review of my code may point
out some glaring error on my part.

Please let me know.

I'm also happy to run DBI traces or assist in any way as I was planning
on using DBI and SQLite for a project.

Thanks.

------- env -------

perl path   : /usr/bin/perl
perl ver    : 5.8.8
DBI         : 1.59 (latest SVN)
DBD::SQLite : 1.13
SQLite::DBMS: 3.4.0.1 (latest SVN)

------- code -------

#!/usr/bin/env perl

#
# bugs:
#
# 1) this same bug shows up with
#    - DBI-1.57, 1.58, 1.59 (latest svn code)
#    - perl-5.8.8, perl-5.8.8-latest, perl-5.9.4 and perl-5.9.5-latest
#    - SQLite-3.3.13, SQLite-3.4.0, SQLite-3.4.0-latest
#
# 2) problem is fixed by one of following
#    - export MALLOC_CHECK_=0 before running script
#    - $bugfix1 = 1
#    - $bugfix2 = 1
#

my $bugfix1 = 0;
my $bugfix2 = 0;

$| = 1;

use strict;
use warnings;

use DBI;

use Carp qw/croak/;

use File::Path;
use File::Basename;

my($dbh,$sth);

my $table  = 'mxaddrs';
my $create = "CREATE TABLE $table (domain TEXT, pri INTEGER, addrs TEXT, PRIMARY 
KEY(domain,pri));";
my $insert = "INSERT INTO $table (domain,pri,addrs) VALUES (?, ?, ?);";

sub init_db {

    my $db = shift;

    my $init = 0;
    if (! -e $db) {
       $init = 1;
       mkpath(dirname($db));
    }

    my $dsn  = "dbi:SQLite:$db";
    my $opts = { RaiseError => 0, PrintError => 0, PrintWarn => 0, AutoCommit => 
0, Taint => 1, };

    $dbh = DBI->connect($dsn,'','',$opts) || croak("Unable to dbopen($db): ", 
$DBI::errstr);

    $dbh->do($create) || croak("Unable to create($table): ", $DBI::errstr) if 
$init;

    $sth = $dbh->prepare($insert) || croak("Unable to prepare(sth): ", 
$DBI::errstr);

}

sub process_line {

    my $line = shift;

    $line =~ s/\s*//o;

    my @f = split /\s+/, $line;

    my $domain   = shift @f;
    my $priority = shift @f;

    $sth = $dbh->prepare($insert) || croak("Unable to prepare(sth): ", 
$DBI::errstr) if $bugfix2;

    my $rc = $sth->execute($domain,$priority,"@f");
    if ($rc) {
       print STDERR "okay: $domain $priority\n";
    }
    else {
       # seems i/o cheaper to simply ignore duplicate updates, rather than 
attempting
       # to read for duplicates first as most will be new records
       if ($DBI::errstr =~ /are not unique/io) {
          print STDERR "skip: expected err : $domain $priority = 
\"$DBI::errstr\"\n";
       }
       else {
          print STDERR "fail: unexpected err : $domain $priority = 
\"$DBI::errstr\"\n";
          croak("Unable to insert($domain:$priority): ", $DBI::errstr);
       }
    }

}


my $test = shift @ARGV;

my @testdata = (
   "coolsurf.com 1 69.94.137.12",
   "coolsurf.com 2 69.94.137.128",
);

my $db = '/common/data/dnscache.sq3';

unlink $db if $bugfix1;

init_db($db);

if ($test) {
   foreach my $arg (@testdata) {
      process_line($arg);
   }
}
else {
   while (my $line = <STDIN>) {
      process_line($line);
   }
}

# must call $sth->finish else $sth remains active
$sth->finish;
croak("sth still active after finish.") if $sth->{Active};

#must undef $sth else "closing dbh with active statement handles" happens at 
$dbh->disconnect
$sth = undef;

$dbh->commit;
$dbh->disconnect;

system("sqlite3 $db .dump");

--
Like feeling your best ever, all day, every day? Here's how...
Your simple secrets are here - http://RadicalHealth.com

Reply via email to