-# Copyright (C) 2018-2019 all contributors <meta@public-inbox.org>
+# Copyright (C) 2018-2020 all contributors <meta@public-inbox.org>
# License: AGPL-3.0+ <https://www.gnu.org/licenses/agpl-3.0.txt>
package PublicInbox::Xapcmd;
use strict;
use warnings;
-use PublicInbox::Spawn qw(which spawn);
+use PublicInbox::Spawn qw(which popen_rd);
use PublicInbox::Over;
-use PublicInbox::Search;
-use File::Temp qw(tempdir);
+use PublicInbox::SearchIdx;
+use File::Temp 0.19 (); # ->newdir
use File::Path qw(remove_tree);
+use File::Basename qw(dirname);
+use POSIX ();
# support testing with dev versions of Xapian which installs
# commands with a version number suffix (e.g. "xapian-compact-1.5")
our $XAPIAN_COMPACT = $ENV{XAPIAN_COMPACT} || 'xapian-compact';
+our @COMPACT_OPT = qw(jobs|j=i quiet|q blocksize|b=s no-full|n fuller|F);
sub commit_changes ($$$$) {
- my ($ibx, $old, $new, $opt) = @_;
-
+ my ($ibx, $im, $tmp, $opt) = @_;
+ my $reshard = $opt->{reshard};
my $reindex = $opt->{reindex};
- my $im = $ibx->importer(0);
- $im->lock_acquire if $reindex;
- my @st = stat($old) or die "failed to stat($old): $!\n";
+ $SIG{INT} or die 'BUG: $SIG{INT} not handled';
+ my @old_shard;
- my $over = "$old/over.sqlite3";
- if (-f $over) {
- $over = PublicInbox::Over->new($over);
- $over->connect->sqlite_backup_to_file("$new/over.sqlite3");
- $over = undef;
- }
- rename($old, "$new/old") or die "rename $old => $new/old: $!\n";
- chmod($st[2] & 07777, $new) or die "chmod $old: $!\n";
- rename($new, $old) or die "rename $new => $old: $!\n";
- remove_tree("$old/old") or die "failed to remove $old/old: $!\n";
+ while (my ($old, $newdir) = each %$tmp) {
+ next if $old eq ''; # no invalid paths
+ my @st = stat($old);
+ if (!@st && !defined($opt->{reshard})) {
+ die "failed to stat($old): $!";
+ }
+
+ my $new = $newdir->dirname if defined($newdir);
+ my $over = "$old/over.sqlite3";
+ if (-f $over) { # only for v1, v2 over is untouched
+ defined $new or die "BUG: $over exists when culling v2";
+ $over = PublicInbox::Over->new($over);
+ my $tmp_over = "$new/over.sqlite3";
+ $over->connect->sqlite_backup_to_file($tmp_over);
+ $over = undef;
+ }
+
+ if (!defined($new)) { # culled shard
+ push @old_shard, $old;
+ next;
+ }
- if ($reindex) {
+ if (@st) {
+ chmod($st[2] & 07777, $new) or die "chmod $old: $!\n";
+ rename($old, "$new/old") or
+ die "rename $old => $new/old: $!\n";
+ }
+ # Xtmpdir->DESTROY won't remove $new after this:
+ rename($new, $old) or die "rename $new => $old: $!\n";
+ if (@st) {
+ my $prev = "$old/old";
+ remove_tree($prev) or
+ die "failed to remove $prev: $!\n";
+ }
+ }
+ remove_tree(@old_shard);
+ $tmp = undef;
+ if (!$opt->{-coarse_lock}) {
$opt->{-skip_lock} = 1;
- PublicInbox::Admin::index_inbox($ibx, $opt);
- # implicit lock_release
- } else {
- $im->lock_release;
+
+ if ($im->can('count_shards')) {
+ my $pr = $opt->{-progress};
+ my $n = $im->count_shards;
+ if (defined $reshard && $n != $reshard) {
+ die
+"BUG: counted $n shards after resharding to $reshard";
+ }
+ my $prev = $im->{shards};
+ if ($pr && $prev != $n) {
+ $pr->("shard count changed: $prev => $n\n");
+ $im->{shards} = $n;
+ }
+ }
+
+ PublicInbox::Admin::index_inbox($ibx, $im, $opt);
}
}
-sub xspawn {
- my ($cmd, $env, $opt) = @_;
- if (ref($cmd->[0]) eq 'CODE') {
- my $cb = shift(@$cmd); # $cb = cpdb()
- defined(my $pid = fork) or die "fork: $!";
- return $pid if $pid > 0;
- eval { $cb->($cmd, $env, $opt) };
- die $@ if $@;
- exit 0;
- } else {
- spawn($cmd, $env, $opt);
- }
+sub cb_spawn {
+ my ($cb, $args, $opt) = @_; # $cb = cpdb() or compact()
+ defined(my $pid = fork) or die "fork: $!";
+ return $pid if $pid > 0;
+ $cb->($args, $opt);
+ POSIX::_exit(0);
}
sub runnable_or_die ($) {
which($exe) or die "$exe not found in PATH\n";
}
-sub prepare_reindex ($$) {
- my ($ibx, $reindex) = @_;
- if ($ibx->{version} == 1) {
+sub prepare_reindex ($$$) {
+ my ($ibx, $im, $reindex) = @_;
+ if ($ibx->version == 1) {
my $dir = $ibx->search->xdir(1);
- my $xdb = Search::Xapian::Database->new($dir);
+ my $xdb = $PublicInbox::Search::X{Database}->new($dir);
if (my $lc = $xdb->get_metadata('last_commit')) {
$reindex->{from} = $lc;
}
} else { # v2
- my $v2w = $ibx->importer(0);
my $max;
- $v2w->git_dir_latest(\$max) or return;
+ $im->git_dir_latest(\$max) or return;
my $from = $reindex->{from};
my $mm = $ibx->mm;
my $v = PublicInbox::Search::SCHEMA_VERSION();
}
}
-sub progress_prepare ($) {
- my ($opt) = @_;
- if ($opt->{quiet}) {
- open my $null, '>', '/dev/null' or
- die "failed to open /dev/null: $!\n";
- $opt->{1} = fileno($null);
- $opt->{-dev_null} = $null;
- } else {
- $opt->{-progress} = 1;
- }
+sub same_fs_or_die ($$) {
+ my ($x, $y) = @_;
+ return if ((stat($x))[0] == (stat($y))[0]); # 0 - st_dev
+ die "$x and $y reside on different filesystems\n";
}
-sub run {
- my ($ibx, $cmd, $env, $opt) = @_;
- progress_prepare($opt ||= {});
- my $dir = $ibx->{mainrepo} or die "no mainrepo in inbox\n";
- my $exe = $cmd->[0];
- my $pfx = $exe;
- runnable_or_die($XAPIAN_COMPACT) if $opt->{compact};
+sub process_queue {
+ my ($queue, $cb, $opt) = @_;
+ my $max = $opt->{jobs} || scalar(@$queue);
+ if ($max <= 1) {
+ while (defined(my $args = shift @$queue)) {
+ $cb->($args, $opt);
+ }
+ return;
+ }
- my $reindex; # v1:{ from => $x40 }, v2:{ from => [ $x40, $x40, .. ] } }
- my $from; # per-epoch ranges
+ # run in parallel:
+ my %pids;
+ while (@$queue) {
+ while (scalar(keys(%pids)) < $max && scalar(@$queue)) {
+ my $args = shift @$queue;
+ $pids{cb_spawn($cb, $args, $opt)} = $args;
+ }
- if (ref($exe) eq 'CODE') {
- $pfx = 'CODE';
- $reindex = $opt->{reindex} = {};
- $from = $reindex->{from} = [];
- require Search::Xapian::WritableDatabase;
- } else {
- runnable_or_die($exe);
+ while (scalar keys %pids) {
+ my $pid = waitpid(-1, 0);
+ my $args = delete $pids{$pid};
+ die join(' ', @$args)." failed: $?\n" if $?;
+ }
}
- $ibx->umask_prepare;
+}
+
+sub setup_signals () {
+ # http://www.tldp.org/LDP/abs/html/exitcodes.html
+ $SIG{INT} = sub { exit(130) };
+ $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = sub { exit(1) };
+}
+
+sub prepare_run {
+ my ($ibx, $opt) = @_;
+ my $tmp = {}; # old shard dir => File::Temp->newdir object or undef
+ my @queue; # ([old//src,newdir]) - list of args for cpdb() or compact()
+
my $old = $ibx->search->xdir(1);
-d $old or die "$old does not exist\n";
- my $new = tempdir("$pfx-XXXXXXXX", DIR => $dir);
- my $v = $ibx->{version} ||= 1;
- my @cmds;
- if ($v == 1) {
- push @cmds, [@$cmd, $old, $new];
+ my $reshard = $opt->{reshard};
+ if (defined $reshard && $reshard <= 0) {
+ die "--reshard must be a positive number\n";
+ }
+
+ # we want temporary directories to be as deep as possible,
+ # so v2 shards can keep "xap$SCHEMA_VERSION" on a separate FS.
+ if ($ibx->version == 1) {
+ if (defined $reshard) {
+ warn
+"--reshard=$reshard ignored for v1 $ibx->{inboxdir}\n";
+ }
+ my $dir = dirname($old);
+ same_fs_or_die($dir, $old);
+ my $v = PublicInbox::Search::SCHEMA_VERSION();
+ my $wip = File::Temp->newdir("xapian$v-XXXXXXXX", DIR => $dir);
+ $tmp->{$old} = $wip;
+ push @queue, [ $old, $wip ];
} else {
opendir my $dh, $old or die "Failed to opendir $old: $!\n";
+ my @old_shards;
while (defined(my $dn = readdir($dh))) {
- if ($dn =~ /\A\d+\z/) {
- push @cmds, [@$cmd, "$old/$dn", "$new/$dn"];
+ if ($dn =~ /\A[0-9]+\z/) {
+ push @old_shards, $dn;
} elsif ($dn eq '.' || $dn eq '..') {
} elsif ($dn =~ /\Aover\.sqlite3/) {
} else {
warn "W: skipping unknown dir: $old/$dn\n"
}
}
- die "No Xapian parts found in $old\n" unless @cmds;
+ die "No Xapian shards found in $old\n" unless @old_shards;
+
+ my ($src, $max_shard);
+ if (!defined($reshard) || $reshard == scalar(@old_shards)) {
+ # 1:1 copy
+ $max_shard = scalar(@old_shards) - 1;
+ } else {
+ # M:N copy
+ $max_shard = $reshard - 1;
+ $src = [ map { "$old/$_" } @old_shards ];
+ }
+ foreach my $dn (0..$max_shard) {
+ my $tmpl = "$dn-XXXXXXXX";
+ my $wip = File::Temp->newdir($tmpl, DIR => $old);
+ same_fs_or_die($old, $wip->dirname);
+ my $cur = "$old/$dn";
+ push @queue, [ $src // $cur , $wip ];
+ $tmp->{$cur} = $wip;
+ }
+ # mark old shards to be unlinked
+ if ($src) {
+ $tmp->{$_} ||= undef for @$src;
+ }
}
- my $im = $ibx->importer(0);
- my $max = $opt->{jobs} || scalar(@cmds);
+ ($tmp, \@queue);
+}
+
+sub check_compact () { runnable_or_die($XAPIAN_COMPACT) }
+
+sub run {
+ my ($ibx, $task, $opt) = @_; # task = 'cpdb' or 'compact'
+ my $cb = \&${\"PublicInbox::Xapcmd::$task"};
+ PublicInbox::Admin::progress_prepare($opt ||= {});
+ defined(my $dir = $ibx->{inboxdir}) or die "no inboxdir defined\n";
+ -d $dir or die "inboxdir=$dir does not exist\n";
+ check_compact() if $opt->{compact};
+ my $reindex; # v1:{ from => $x40 }, v2:{ from => [ $x40, $x40, .. ] } }
+
+ if (!$opt->{-coarse_lock}) {
+ $reindex = $opt->{reindex} = {};
+ $reindex->{from} = []; # per-epoch ranges
+ require PublicInbox::SearchIdx;
+ PublicInbox::SearchIdx::load_xapian_writable();
+ }
+
+ local %SIG = %SIG;
+ setup_signals();
+ $ibx->umask_prepare;
$ibx->with_umask(sub {
+ my $im = $ibx->importer(0);
$im->lock_acquire;
+ my ($tmp, $queue) = prepare_run($ibx, $opt);
# fine-grained locking if we prepare for reindex
- if ($reindex) {
- prepare_reindex($ibx, $reindex);
+ if (!$opt->{-coarse_lock}) {
+ prepare_reindex($ibx, $im, $reindex);
$im->lock_release;
}
- delete($ibx->{$_}) for (qw(mm over search)); # cleanup
- my %pids;
- while (@cmds) {
- while (scalar(keys(%pids)) < $max && scalar(@cmds)) {
- my $x = shift @cmds;
- $pids{xspawn($x, $env, $opt)} = $x;
- }
- while (scalar keys %pids) {
- my $pid = waitpid(-1, 0);
- my $x = delete $pids{$pid};
- die join(' ', @$x)." failed: $?\n" if $?;
- }
- }
- commit_changes($ibx, $old, $new, $opt);
+ $ibx->cleanup;
+ process_queue($queue, $cb, $opt);
+ $im->lock_acquire if !$opt->{-coarse_lock};
+ commit_changes($ibx, $im, $tmp, $opt);
});
}
sub cpdb_retryable ($$) {
- my ($src, $err) = @_;
- if (ref($err) eq 'Search::Xapian::DatabaseModifiedError') {
- warn "$err, reopening and retrying\n";
+ my ($src, $pfx) = @_;
+ if (ref($@) =~ /\bDatabaseModifiedError\b/) {
+ warn "$pfx Xapian DB modified, reopening and retrying\n";
$src->reopen;
return 1;
}
- die $err if $err;
+ if ($@) {
+ warn "$pfx E: ", ref($@), "\n";
+ die;
+ }
0;
}
-# Like copydatabase(1), this is horribly slow; and it doesn't seem due
-# to the overhead of Perl.
-sub cpdb {
- my ($args, $env, $opt) = @_;
- my ($old, $new) = @$args;
- my $src = Search::Xapian::Database->new($old);
- my $tmp = $opt->{compact} ? "$new.compact" : $new;
+sub progress_pfx ($) {
+ my ($wip) = @_; # tempdir v2: ([0-9])+-XXXXXXXX
+ my @p = split('/', $wip);
- # like copydatabase(1), be sure we don't overwrite anything in case
- # of other bugs:
- my $creat = Search::Xapian::DB_CREATE();
- my $dst = Search::Xapian::WritableDatabase->new($tmp, $creat);
- my ($it, $end);
- my ($pfx, $nr, $tot, $fmt); # progress output
+ # return "xap15/0" for v2, or "xapian15" for v1:
+ ($p[-1] =~ /\A([0-9]+)/) ? "$p[-2]/$1" : $p[-1];
+}
+# xapian-compact wrapper
+sub compact ($$) {
+ my ($args, $opt) = @_;
+ my ($src, $newdir) = @$args;
+ my $dst = ref($newdir) ? $newdir->dirname : $newdir;
+ my $pfx = $opt->{-progress_pfx} ||= progress_pfx($src);
+ my $pr = $opt->{-progress};
+ my $rdr = {};
+
+ foreach my $fd (0..2) {
+ defined(my $dfd = $opt->{$fd}) or next;
+ $rdr->{$fd} = $dfd;
+ }
+
+ # we rely on --no-renumber to keep docids synched to NNTP
+ my $cmd = [ $XAPIAN_COMPACT, '--no-renumber' ];
+ for my $sw (qw(no-full fuller)) {
+ push @$cmd, "--$sw" if $opt->{$sw};
+ }
+ for my $sw (qw(blocksize)) {
+ defined(my $v = $opt->{$sw}) or next;
+ push @$cmd, "--$sw", $v;
+ }
+ $pr->("$pfx `".join(' ', @$cmd)."'\n") if $pr;
+ push @$cmd, $src, $dst;
+ my $rd = popen_rd($cmd, undef, $rdr);
+ while (<$rd>) {
+ if ($pr) {
+ s/\r/\r$pfx /g;
+ $pr->("$pfx $_");
+ }
+ }
+ close $rd or die join(' ', @$cmd)." failed: $?n";
+}
+
+sub cpdb_loop ($$$;$$) {
+ my ($src, $dst, $pr_data, $cur_shard, $reshard) = @_;
+ my ($pr, $fmt, $nr, $pfx);
+ if ($pr_data) {
+ $pr = $pr_data->{pr};
+ $fmt = $pr_data->{fmt};
+ $nr = \($pr_data->{nr});
+ $pfx = $pr_data->{pfx};
+ }
+
+ my ($it, $end);
do {
eval {
- # update the only metadata key for v1:
- my $lc = $src->get_metadata('last_commit');
- $dst->set_metadata('last_commit', $lc) if $lc;
-
$it = $src->postlist_begin('');
$end = $src->postlist_end('');
- if ($opt->{-progress}) {
- $nr = 0;
- $pfx = (split('/', $old))[-1].':';
- $tot = $src->get_doccount;
- $fmt = "$pfx % ".length($tot)."u/$tot\n";
- warn "$pfx copying $tot documents\n";
- }
};
- } while (cpdb_retryable($src, $@));
+ } while (cpdb_retryable($src, $pfx));
do {
eval {
- while ($it != $end) {
+ for (; $it != $end; $it++) {
my $docid = $it->get_docid;
+ if (defined $reshard) {
+ my $dst_shard = $docid % $reshard;
+ next if $dst_shard != $cur_shard;
+ }
my $doc = $src->get_document($docid);
$dst->replace_document($docid, $doc);
- $it->inc;
- if ($fmt && !(++$nr & 1023)) {
- warn(sprintf($fmt, $nr));
+ if ($pr_data && !(++$$nr & 1023)) {
+ $pr->(sprintf($fmt, $$nr));
}
}
# the Perl APIs don't expose iterators for them
# (and public-inbox does not use those features)
};
- } while (cpdb_retryable($src, $@));
-
- warn(sprintf($fmt, $nr)) if $fmt;
- return unless $opt->{compact};
+ } while (cpdb_retryable($src, $pfx));
+}
- $src = $dst = undef; # flushes and closes
+# Like copydatabase(1), this is horribly slow; and it doesn't seem due
+# to the overhead of Perl.
+sub cpdb ($$) {
+ my ($args, $opt) = @_;
+ my ($old, $newdir) = @$args;
+ my $new = $newdir->dirname;
+ my ($src, $cur_shard);
+ my $reshard;
+ PublicInbox::SearchIdx::load_xapian_writable() or die;
+ my $XapianDatabase = $PublicInbox::Search::X{Database};
+ if (ref($old) eq 'ARRAY') {
+ ($cur_shard) = ($new =~ m!xap[0-9]+/([0-9]+)\b!);
+ defined $cur_shard or
+ die "BUG: could not extract shard # from $new";
+ $reshard = $opt->{reshard};
+ defined $reshard or die 'BUG: got array src w/o --reshard';
- warn "$pfx compacting...\n" if $pfx;
- # this is probably the best place to do xapian-compact
- # since $dst isn't readable by HTTP or NNTP clients, yet:
- my $cmd = [ $XAPIAN_COMPACT, '--no-renumber', $tmp, $new ];
- my $rdr = {};
- foreach my $fd (0..2) {
- defined(my $dst = $opt->{$fd}) or next;
- $rdr->{$fd} = $dst;
+ # resharding, M:N copy means have full read access
+ foreach (@$old) {
+ if ($src) {
+ my $sub = $XapianDatabase->new($_);
+ $src->add_database($sub);
+ } else {
+ $src = $XapianDatabase->new($_);
+ }
+ }
+ } else {
+ $src = $XapianDatabase->new($old);
}
- my ($r, $w);
- if ($pfx && pipe($r, $w)) {
- $rdr->{1} = fileno($w);
+ my ($tmp, $ft);
+ local %SIG = %SIG;
+ if ($opt->{compact}) {
+ my $dir = dirname($new);
+ same_fs_or_die($dir, $new);
+ $ft = File::Temp->newdir("$new.compact-XXXXXX", DIR => $dir);
+ setup_signals();
+ $tmp = $ft->dirname;
+ } else {
+ $tmp = $new;
}
- my $pid = spawn($cmd, $env, $rdr);
- if ($pfx) {
- close $w or die "close: \$w: $!";
- foreach (<$r>) {
- s/\r/\r$pfx /g;
- warn "$pfx $_";
+
+ # like copydatabase(1), be sure we don't overwrite anything in case
+ # of other bugs:
+ my $creat = eval($PublicInbox::Search::Xap.'::DB_CREATE()');
+ die if $@;
+ my $XapianWritableDatabase = $PublicInbox::Search::X{WritableDatabase};
+ my $dst = $XapianWritableDatabase->new($tmp, $creat);
+ my $pr = $opt->{-progress};
+ my $pfx = $opt->{-progress_pfx} = progress_pfx($new);
+ my $pr_data = { pr => $pr, pfx => $pfx, nr => 0 } if $pr;
+
+ do {
+ eval {
+ # update the only metadata key for v1:
+ my $lc = $src->get_metadata('last_commit');
+ $dst->set_metadata('last_commit', $lc) if $lc;
+
+ # only the first xapian shard (0) gets 'indexlevel'
+ if ($new =~ m!(?:xapian[0-9]+|xap[0-9]+/0)\b!) {
+ my $l = $src->get_metadata('indexlevel');
+ if ($l eq 'medium') {
+ $dst->set_metadata('indexlevel', $l);
+ }
+ }
+ if ($pr_data) {
+ my $tot = $src->get_doccount;
+
+ # we can only estimate when resharding,
+ # because removed spam causes slight imbalance
+ my $est = '';
+ if (defined $cur_shard && $reshard > 1) {
+ $tot = int($tot/$reshard);
+ $est = 'around ';
+ }
+ my $fmt = "$pfx % ".length($tot)."u/$tot\n";
+ $pr->("$pfx copying $est$tot documents\n");
+ $pr_data->{fmt} = $fmt;
+ $pr_data->{total} = $tot;
+ }
+ };
+ } while (cpdb_retryable($src, $pfx));
+
+ if (defined $reshard) {
+ # we rely on document IDs matching NNTP article number,
+ # so we can't have the Xapian sharding DB support rewriting
+ # document IDs. Thus we iterate through each shard
+ # individually.
+ $src = undef;
+ foreach (@$old) {
+ my $old = $XapianDatabase->new($_);
+ cpdb_loop($old, $dst, $pr_data, $cur_shard, $reshard);
}
+ } else {
+ cpdb_loop($src, $dst, $pr_data);
}
- my $rp = waitpid($pid, 0);
- if ($? || $rp != $pid) {
- die join(' ', @$cmd)." failed: $? (pid=$pid, reaped=$rp)\n";
- }
+
+ $pr->(sprintf($pr_data->{fmt}, $pr_data->{nr})) if $pr;
+ return unless $opt->{compact};
+
+ $src = $dst = undef; # flushes and closes
+
+ # this is probably the best place to do xapian-compact
+ # since $dst isn't readable by HTTP or NNTP clients, yet:
+ compact([ $tmp, $new ], $opt);
remove_tree($tmp) or die "failed to remove $tmp: $!\n";
}