X-Git-Url: http://www.git.stargrave.org/?p=public-inbox.git;a=blobdiff_plain;f=lib%2FPublicInbox%2FXapcmd.pm;h=19c6ff077d21911f003888edd2cc74a0b99f16e5;hp=06389dd0fdef53fa8b03613d8538b3e0e5950fd8;hb=227a1d886672767e37cc86a3432952c14eb8a143;hpb=ed52051030b89985a1ec032ed4acc74912b0dd80 diff --git a/lib/PublicInbox/Xapcmd.pm b/lib/PublicInbox/Xapcmd.pm index 06389dd0..19c6ff07 100644 --- a/lib/PublicInbox/Xapcmd.pm +++ b/lib/PublicInbox/Xapcmd.pm @@ -3,64 +3,91 @@ package PublicInbox::Xapcmd; use strict; use warnings; -use PublicInbox::Spawn qw(which spawn); +use PublicInbox::Spawn qw(which popen_rd); use PublicInbox::Over; -use PublicInbox::Search; -use File::Temp qw(tempdir); +use PublicInbox::SearchIdx; +use File::Temp 0.19 (); # ->newdir use File::Path qw(remove_tree); use File::Basename qw(dirname); +use POSIX (); # support testing with dev versions of Xapian which installs # commands with a version number suffix (e.g. "xapian-compact-1.5") our $XAPIAN_COMPACT = $ENV{XAPIAN_COMPACT} || 'xapian-compact'; +our @COMPACT_OPT = qw(jobs|j=i quiet|q blocksize|b=s no-full|n fuller|F); -sub commit_changes ($$$) { - my ($ibx, $tmp, $opt) = @_; - +sub commit_changes ($$$$) { + my ($ibx, $im, $tmp, $opt) = @_; + my $reshard = $opt->{reshard}; my $reindex = $opt->{reindex}; - my $im = $ibx->importer(0); - $im->lock_acquire if $reindex; - while (my ($old, $new) = each %$tmp) { - my @st = stat($old) or die "failed to stat($old): $!\n"; + $SIG{INT} or die 'BUG: $SIG{INT} not handled'; + my @old_shard; + + while (my ($old, $newdir) = each %$tmp) { + next if $old eq ''; # no invalid paths + my @st = stat($old); + if (!@st && !defined($opt->{reshard})) { + die "failed to stat($old): $!"; + } + my $new = $newdir->dirname if defined($newdir); my $over = "$old/over.sqlite3"; if (-f $over) { # only for v1, v2 over is untouched + defined $new or die "BUG: $over exists when culling v2"; $over = PublicInbox::Over->new($over); my $tmp_over = "$new/over.sqlite3"; $over->connect->sqlite_backup_to_file($tmp_over); $over = undef; } - chmod($st[2] & 07777, $new) or die "chmod $old: $!\n"; + if (!defined($new)) { # culled shard + push @old_shard, $old; + next; + } + + if (@st) { + chmod($st[2] & 07777, $new) or die "chmod $old: $!\n"; + rename($old, "$new/old") or + die "rename $old => $new/old: $!\n"; + } # Xtmpdir->DESTROY won't remove $new after this: - rename($old, "$new/old") or die "rename $old => $new/old: $!\n"; rename($new, $old) or die "rename $new => $old: $!\n"; - my $prev = "$old/old"; - remove_tree($prev) or die "failed to remove $prev: $!\n"; + if (@st) { + my $prev = "$old/old"; + remove_tree($prev) or + die "failed to remove $prev: $!\n"; + } } - $tmp->done; - if ($reindex) { + remove_tree(@old_shard); + $tmp = undef; + if (!$opt->{-coarse_lock}) { $opt->{-skip_lock} = 1; - PublicInbox::Admin::index_inbox($ibx, $opt); - # implicit lock_release - } else { - $im->lock_release; + + if ($im->can('count_shards')) { + my $pr = $opt->{-progress}; + my $n = $im->count_shards; + if (defined $reshard && $n != $reshard) { + die +"BUG: counted $n shards after resharding to $reshard"; + } + my $prev = $im->{shards}; + if ($pr && $prev != $n) { + $pr->("shard count changed: $prev => $n\n"); + $im->{shards} = $n; + } + } + + PublicInbox::Admin::index_inbox($ibx, $im, $opt); } } -sub xspawn { - my ($cmd, $env, $opt) = @_; - if (ref($cmd->[0]) eq 'CODE') { - my $cb = shift(@$cmd); # $cb = cpdb() - defined(my $pid = fork) or die "fork: $!"; - return $pid if $pid > 0; - eval { $cb->($cmd, $env, $opt) }; - die $@ if $@; - exit 0; - } else { - spawn($cmd, $env, $opt); - } +sub cb_spawn { + my ($cb, $args, $opt) = @_; # $cb = cpdb() or compact() + defined(my $pid = fork) or die "fork: $!"; + return $pid if $pid > 0; + $cb->($args, $opt); + POSIX::_exit(0); } sub runnable_or_die ($) { @@ -68,18 +95,17 @@ sub runnable_or_die ($) { which($exe) or die "$exe not found in PATH\n"; } -sub prepare_reindex ($$) { - my ($ibx, $reindex) = @_; - if ($ibx->{version} == 1) { +sub prepare_reindex ($$$) { + my ($ibx, $im, $reindex) = @_; + if ($ibx->version == 1) { my $dir = $ibx->search->xdir(1); - my $xdb = Search::Xapian::Database->new($dir); + my $xdb = $PublicInbox::Search::X{Database}->new($dir); if (my $lc = $xdb->get_metadata('last_commit')) { $reindex->{from} = $lc; } } else { # v2 - my $v2w = $ibx->importer(0); my $max; - $v2w->git_dir_latest(\$max) or return; + $im->git_dir_latest(\$max) or return; my $from = $reindex->{from}; my $mm = $ibx->mm; my $v = PublicInbox::Search::SCHEMA_VERSION(); @@ -89,105 +115,143 @@ sub prepare_reindex ($$) { } } -sub progress_prepare ($) { - my ($opt) = @_; - if ($opt->{quiet}) { - open my $null, '>', '/dev/null' or - die "failed to open /dev/null: $!\n"; - $opt->{1} = fileno($null); - $opt->{-dev_null} = $null; - } else { - $opt->{-progress} = sub { print STDERR @_ }; - } -} - sub same_fs_or_die ($$) { my ($x, $y) = @_; return if ((stat($x))[0] == (stat($y))[0]); # 0 - st_dev die "$x and $y reside on different filesystems\n"; } +sub process_queue { + my ($queue, $cb, $max, $opt) = @_; + if ($max <= 1) { + while (defined(my $args = shift @$queue)) { + $cb->($args, $opt); + } + return; + } + + # run in parallel: + my %pids; + while (@$queue) { + while (scalar(keys(%pids)) < $max && scalar(@$queue)) { + my $args = shift @$queue; + $pids{cb_spawn($cb, $args, $opt)} = $args; + } + + while (scalar keys %pids) { + my $pid = waitpid(-1, 0); + my $args = delete $pids{$pid}; + die join(' ', @$args)." failed: $?\n" if $?; + } + } +} + +sub setup_signals () { + # http://www.tldp.org/LDP/abs/html/exitcodes.html + $SIG{INT} = sub { exit(130) }; + $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = sub { exit(1) }; +} + sub run { - my ($ibx, $cmd, $env, $opt) = @_; - progress_prepare($opt ||= {}); - my $dir = $ibx->{mainrepo} or die "no mainrepo in inbox\n"; - my $exe = $cmd->[0]; + my ($ibx, $task, $opt) = @_; # task = 'cpdb' or 'compact' + my $cb = \&${\"PublicInbox::Xapcmd::$task"}; + PublicInbox::Admin::progress_prepare($opt ||= {}); + my $dir = $ibx->{inboxdir} or die "no inboxdir in inbox\n"; runnable_or_die($XAPIAN_COMPACT) if $opt->{compact}; - my $reindex; # v1:{ from => $x40 }, v2:{ from => [ $x40, $x40, .. ] } } my $from; # per-epoch ranges - if (ref($exe) eq 'CODE') { + if (!$opt->{-coarse_lock}) { $reindex = $opt->{reindex} = {}; $from = $reindex->{from} = []; - require Search::Xapian::WritableDatabase; - } else { - runnable_or_die($exe); + require PublicInbox::SearchIdx; + PublicInbox::SearchIdx::load_xapian_writable(); } + $ibx->umask_prepare; my $old = $ibx->search->xdir(1); -d $old or die "$old does not exist\n"; - my $tmp = PublicInbox::Xtmpdirs->new; - my $v = $ibx->{version} ||= 1; - my @cmds; + my $tmp = {}; + my @q; + my $reshard = $opt->{reshard}; + if (defined $reshard && $reshard <= 0) { + die "--reshard must be a positive number\n"; + } + + local %SIG = %SIG; + setup_signals(); # we want temporary directories to be as deep as possible, - # so v2 partitions can keep "xap$SCHEMA_VERSION" on a separate FS. - if ($v == 1) { - my $old_parent = dirname($old); - same_fs_or_die($old_parent, $old); - $tmp->{$old} = tempdir('xapcmd-XXXXXXXX', DIR => $old_parent); - push @cmds, [ @$cmd, $old, $tmp->{$old} ]; + # so v2 shards can keep "xap$SCHEMA_VERSION" on a separate FS. + if ($ibx->version == 1) { + if (defined $reshard) { + warn +"--reshard=$reshard ignored for v1 $ibx->{inboxdir}\n"; + } + my $dir = dirname($old); + same_fs_or_die($dir, $old); + my $v = PublicInbox::Search::SCHEMA_VERSION(); + my $wip = File::Temp->newdir("xapian$v-XXXXXXXX", DIR => $dir); + $tmp->{$old} = $wip; + push @q, [ $old, $wip ]; } else { opendir my $dh, $old or die "Failed to opendir $old: $!\n"; + my @old_shards; while (defined(my $dn = readdir($dh))) { - if ($dn =~ /\A\d+\z/) { - my $tmpl = "$dn-XXXXXXXX"; - my $dst = tempdir($tmpl, DIR => $old); - same_fs_or_die($old, $dst); - my $cur = "$old/$dn"; - push @cmds, [@$cmd, $cur, $dst ]; - $tmp->{$cur} = $dst; + if ($dn =~ /\A[0-9]+\z/) { + push @old_shards, $dn; } elsif ($dn eq '.' || $dn eq '..') { } elsif ($dn =~ /\Aover\.sqlite3/) { } else { warn "W: skipping unknown dir: $old/$dn\n" } } - die "No Xapian parts found in $old\n" unless @cmds; + die "No Xapian shards found in $old\n" unless @old_shards; + + my ($src, $max_shard); + if (!defined($reshard) || $reshard == scalar(@old_shards)) { + # 1:1 copy + $max_shard = scalar(@old_shards) - 1; + } else { + # M:N copy + $max_shard = $reshard - 1; + $src = [ map { "$old/$_" } @old_shards ]; + } + foreach my $dn (0..$max_shard) { + my $tmpl = "$dn-XXXXXXXX"; + my $wip = File::Temp->newdir($tmpl, DIR => $old); + same_fs_or_die($old, $wip->dirname); + my $cur = "$old/$dn"; + push @q, [ $src // $cur , $wip ]; + $tmp->{$cur} = $wip; + } + # mark old shards to be unlinked + if ($src) { + $tmp->{$_} ||= undef for @$src; + } } - my $im = $ibx->importer(0); - my $max = $opt->{jobs} || scalar(@cmds); + my $max = $opt->{jobs} || scalar(@q); $ibx->with_umask(sub { + my $im = $ibx->importer(0); $im->lock_acquire; # fine-grained locking if we prepare for reindex - if ($reindex) { - prepare_reindex($ibx, $reindex); + if (!$opt->{-coarse_lock}) { + prepare_reindex($ibx, $im, $reindex); $im->lock_release; } - delete($ibx->{$_}) for (qw(mm over search)); # cleanup - my %pids; - while (@cmds) { - while (scalar(keys(%pids)) < $max && scalar(@cmds)) { - my $x = shift @cmds; - $pids{xspawn($x, $env, $opt)} = $x; - } - while (scalar keys %pids) { - my $pid = waitpid(-1, 0); - my $x = delete $pids{$pid}; - die join(' ', @$x)." failed: $?\n" if $?; - } - } - commit_changes($ibx, $tmp, $opt); + $ibx->cleanup; + process_queue(\@q, $cb, $max, $opt); + $im->lock_acquire if !$opt->{-coarse_lock}; + commit_changes($ibx, $im, $tmp, $opt); }); } sub cpdb_retryable ($$) { my ($src, $pfx) = @_; - if (ref($@) eq 'Search::Xapian::DatabaseModifiedError') { + if (ref($@) =~ /\bDatabaseModifiedError\b/) { warn "$pfx Xapian DB modified, reopening and retrying\n"; $src->reopen; return 1; @@ -199,51 +263,79 @@ sub cpdb_retryable ($$) { 0; } -# Like copydatabase(1), this is horribly slow; and it doesn't seem due -# to the overhead of Perl. -sub cpdb { - my ($args, $env, $opt) = @_; - my ($old, $new) = @$args; - my $src = Search::Xapian::Database->new($old); - my $tmp = $opt->{compact} ? "$new.compact" : $new; +sub progress_pfx ($) { + my ($wip) = @_; # tempdir v2: ([0-9])+-XXXXXXXX + my @p = split('/', $wip); - # like copydatabase(1), be sure we don't overwrite anything in case - # of other bugs: - my $creat = Search::Xapian::DB_CREATE(); - my $dst = Search::Xapian::WritableDatabase->new($tmp, $creat); - my ($it, $end); - my $pfx = ''; - my ($nr, $tot, $fmt); # progress output + # return "xap15/0" for v2, or "xapian15" for v1: + ($p[-1] =~ /\A([0-9]+)/) ? "$p[-2]/$1" : $p[-1]; +} + +# xapian-compact wrapper +sub compact ($$) { + my ($args, $opt) = @_; + my ($src, $newdir) = @$args; + my $dst = ref($newdir) ? $newdir->dirname : $newdir; + my $pfx = $opt->{-progress_pfx} ||= progress_pfx($src); my $pr = $opt->{-progress}; + my $rdr = {}; + + foreach my $fd (0..2) { + defined(my $dfd = $opt->{$fd}) or next; + $rdr->{$fd} = $dfd; + } + + # we rely on --no-renumber to keep docids synched to NNTP + my $cmd = [ $XAPIAN_COMPACT, '--no-renumber' ]; + for my $sw (qw(no-full fuller)) { + push @$cmd, "--$sw" if $opt->{$sw}; + } + for my $sw (qw(blocksize)) { + defined(my $v = $opt->{$sw}) or next; + push @$cmd, "--$sw", $v; + } + $pr->("$pfx `".join(' ', @$cmd)."'\n") if $pr; + push @$cmd, $src, $dst; + my $rd = popen_rd($cmd, undef, $rdr); + while (<$rd>) { + if ($pr) { + s/\r/\r$pfx /g; + $pr->("$pfx $_"); + } + } + close $rd or die join(' ', @$cmd)." failed: $?n"; +} +sub cpdb_loop ($$$;$$) { + my ($src, $dst, $pr_data, $cur_shard, $reshard) = @_; + my ($pr, $fmt, $nr, $pfx); + if ($pr_data) { + $pr = $pr_data->{pr}; + $fmt = $pr_data->{fmt}; + $nr = \($pr_data->{nr}); + $pfx = $pr_data->{pfx}; + } + + my ($it, $end); do { eval { - # update the only metadata key for v1: - my $lc = $src->get_metadata('last_commit'); - $dst->set_metadata('last_commit', $lc) if $lc; - $it = $src->postlist_begin(''); $end = $src->postlist_end(''); - if ($pr) { - $nr = 0; - $tot = $src->get_doccount; - my @p = split('/', $old); - $pfx = "$p[-2]/$p[-1]:"; - $fmt = "$pfx % ".length($tot)."u/$tot\n"; - $pr->("$pfx copying $tot documents\n"); - } }; } while (cpdb_retryable($src, $pfx)); do { eval { - while ($it != $end) { + for (; $it != $end; $it++) { my $docid = $it->get_docid; + if (defined $reshard) { + my $dst_shard = $docid % $reshard; + next if $dst_shard != $cur_shard; + } my $doc = $src->get_document($docid); $dst->replace_document($docid, $doc); - $it->inc; - if ($pr && !(++$nr & 1023)) { - $pr->(sprintf($fmt, $nr)); + if ($pr_data && !(++$$nr & 1023)) { + $pr->(sprintf($fmt, $$nr)); } } @@ -253,72 +345,114 @@ sub cpdb { # (and public-inbox does not use those features) }; } while (cpdb_retryable($src, $pfx)); +} - $pr->(sprintf($fmt, $nr)) if $pr; - return unless $opt->{compact}; - - $src = $dst = undef; # flushes and closes - - $pr->("$pfx compacting...\n") if $pr; - # this is probably the best place to do xapian-compact - # since $dst isn't readable by HTTP or NNTP clients, yet: - my $cmd = [ $XAPIAN_COMPACT, '--no-renumber', $tmp, $new ]; - my $rdr = {}; - foreach my $fd (0..2) { - defined(my $dst = $opt->{$fd}) or next; - $rdr->{$fd} = $dst; - } - - my ($r, $w); - if ($pr && pipe($r, $w)) { - $rdr->{1} = fileno($w); - } - my $pid = spawn($cmd, $env, $rdr); - if ($pr) { - close $w or die "close: \$w: $!"; - foreach (<$r>) { - s/\r/\r$pfx /g; - $pr->("$pfx $_"); +# Like copydatabase(1), this is horribly slow; and it doesn't seem due +# to the overhead of Perl. +sub cpdb ($$) { + my ($args, $opt) = @_; + my ($old, $newdir) = @$args; + my $new = $newdir->dirname; + my ($src, $cur_shard); + my $reshard; + PublicInbox::SearchIdx::load_xapian_writable() or die; + my $XapianDatabase = $PublicInbox::Search::X{Database}; + if (ref($old) eq 'ARRAY') { + ($cur_shard) = ($new =~ m!xap[0-9]+/([0-9]+)\b!); + defined $cur_shard or + die "BUG: could not extract shard # from $new"; + $reshard = $opt->{reshard}; + defined $reshard or die 'BUG: got array src w/o --reshard'; + + # resharding, M:N copy means have full read access + foreach (@$old) { + if ($src) { + my $sub = $XapianDatabase->new($_); + $src->add_database($sub); + } else { + $src = $XapianDatabase->new($_); + } } + } else { + $src = $XapianDatabase->new($old); } - my $rp = waitpid($pid, 0); - if ($? || $rp != $pid) { - die join(' ', @$cmd)." failed: $? (pid=$pid, reaped=$rp)\n"; + + my ($tmp, $ft); + local %SIG = %SIG; + if ($opt->{compact}) { + my $dir = dirname($new); + same_fs_or_die($dir, $new); + $ft = File::Temp->newdir("$new.compact-XXXXXX", DIR => $dir); + setup_signals(); + $tmp = $ft->dirname; + } else { + $tmp = $new; } - remove_tree($tmp) or die "failed to remove $tmp: $!\n"; -} -# slightly easier-to-manage manage than END{} blocks -package PublicInbox::Xtmpdirs; -use strict; -use warnings; -use File::Path qw(remove_tree); -my %owner; + # like copydatabase(1), be sure we don't overwrite anything in case + # of other bugs: + my $creat = eval($PublicInbox::Search::Xap.'::DB_CREATE()'); + die if $@; + my $XapianWritableDatabase = $PublicInbox::Search::X{WritableDatabase}; + my $dst = $XapianWritableDatabase->new($tmp, $creat); + my $pr = $opt->{-progress}; + my $pfx = $opt->{-progress_pfx} = progress_pfx($new); + my $pr_data = { pr => $pr, pfx => $pfx, nr => 0 } if $pr; -sub new { - # http://www.tldp.org/LDP/abs/html/exitcodes.html - $SIG{INT} = sub { exit(130) }; - $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = sub { exit(1) }; - my $self = bless {}, $_[0]; # old partition => new (tmp) partition - $owner{"$self"} = $$; - $self; -} + do { + eval { + # update the only metadata key for v1: + my $lc = $src->get_metadata('last_commit'); + $dst->set_metadata('last_commit', $lc) if $lc; -sub done { - my ($self) = @_; - delete $owner{"$self"}; - $SIG{INT} = $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = 'DEFAULT'; - %$self = (); -} + # only the first xapian shard (0) gets 'indexlevel' + if ($new =~ m!(?:xapian[0-9]+|xap[0-9]+/0)\b!) { + my $l = $src->get_metadata('indexlevel'); + if ($l eq 'medium') { + $dst->set_metadata('indexlevel', $l); + } + } + if ($pr_data) { + my $tot = $src->get_doccount; + + # we can only estimate when resharding, + # because removed spam causes slight imbalance + my $est = ''; + if (defined $cur_shard && $reshard > 1) { + $tot = int($tot/$reshard); + $est = 'around '; + } + my $fmt = "$pfx % ".length($tot)."u/$tot\n"; + $pr->("$pfx copying $est$tot documents\n"); + $pr_data->{fmt} = $fmt; + $pr_data->{total} = $tot; + } + }; + } while (cpdb_retryable($src, $pfx)); -sub DESTROY { - my ($self) = @_; - my $owner_pid = delete $owner{"$self"} or return; - return if $owner_pid != $$; - foreach my $new (values %$self) { - remove_tree($new) unless -d "$new/old"; + if (defined $reshard) { + # we rely on document IDs matching NNTP article number, + # so we can't have the Xapian sharding DB support rewriting + # document IDs. Thus we iterate through each shard + # individually. + $src = undef; + foreach (@$old) { + my $old = $XapianDatabase->new($_); + cpdb_loop($old, $dst, $pr_data, $cur_shard, $reshard); + } + } else { + cpdb_loop($src, $dst, $pr_data); } - $SIG{INT} = $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = 'DEFAULT'; + + $pr->(sprintf($pr_data->{fmt}, $pr_data->{nr})) if $pr; + return unless $opt->{compact}; + + $src = $dst = undef; # flushes and closes + + # this is probably the best place to do xapian-compact + # since $dst isn't readable by HTTP or NNTP clients, yet: + compact([ $tmp, $new ], $opt); + remove_tree($tmp) or die "failed to remove $tmp: $!\n"; } 1;