X-Git-Url: http://www.git.stargrave.org/?a=blobdiff_plain;f=lib%2FPublicInbox%2FXapcmd.pm;h=4871378e6314ed6612c1ad91414200df8b40e234;hb=55b707d788ce13696e4411389583e720ea6dab01;hp=e303da9e8ccd339381614e8a85c5f813b93c129e;hpb=a0eabc015e22e51cbf8f6060abafd5b53a0ae72f;p=public-inbox.git diff --git a/lib/PublicInbox/Xapcmd.pm b/lib/PublicInbox/Xapcmd.pm index e303da9e..4871378e 100644 --- a/lib/PublicInbox/Xapcmd.pm +++ b/lib/PublicInbox/Xapcmd.pm @@ -5,32 +5,33 @@ use strict; use warnings; use PublicInbox::Spawn qw(which spawn); use PublicInbox::Over; -use PublicInbox::Search; -use File::Temp qw(tempdir); +use PublicInbox::SearchIdx; +use File::Temp 0.19 (); # ->newdir use File::Path qw(remove_tree); use File::Basename qw(dirname); +use POSIX (); # support testing with dev versions of Xapian which installs # commands with a version number suffix (e.g. "xapian-compact-1.5") our $XAPIAN_COMPACT = $ENV{XAPIAN_COMPACT} || 'xapian-compact'; our @COMPACT_OPT = qw(jobs|j=i quiet|q blocksize|b=s no-full|n fuller|F); -sub commit_changes ($$$) { - my ($ibx, $tmp, $opt) = @_; - my $new_parts = $opt->{reshard}; +sub commit_changes ($$$$) { + my ($ibx, $im, $tmp, $opt) = @_; + my $reshard = $opt->{reshard}; my $reindex = $opt->{reindex}; - my $im = $ibx->importer(0); - $im->lock_acquire if !$opt->{-coarse_lock}; $SIG{INT} or die 'BUG: $SIG{INT} not handled'; - my @old_part; + my @old_shard; - while (my ($old, $new) = each %$tmp) { + while (my ($old, $newdir) = each %$tmp) { + next if $old eq ''; # no invalid paths my @st = stat($old); if (!@st && !defined($opt->{reshard})) { die "failed to stat($old): $!"; } + my $new = $newdir->dirname if defined($newdir); my $over = "$old/over.sqlite3"; if (-f $over) { # only for v1, v2 over is untouched defined $new or die "BUG: $over exists when culling v2"; @@ -40,8 +41,8 @@ sub commit_changes ($$$) { $over = undef; } - if (!defined($new)) { # culled partition - push @old_part, $old; + if (!defined($new)) { # culled shard + push @old_shard, $old; next; } @@ -58,29 +59,26 @@ sub commit_changes ($$$) { die "failed to remove $prev: $!\n"; } } - remove_tree(@old_part); - $tmp->done; + remove_tree(@old_shard); + $tmp = undef; if (!$opt->{-coarse_lock}) { $opt->{-skip_lock} = 1; - if ($im->can('count_partitions')) { + if ($im->can('count_shards')) { my $pr = $opt->{-progress}; - my $n = $im->count_partitions; - if (defined $new_parts && $n != $new_parts) { + my $n = $im->count_shards; + if (defined $reshard && $n != $reshard) { die -"BUG: counted $n shards after resharding to $new_parts"; +"BUG: counted $n shards after resharding to $reshard"; } - my $prev = $im->{partitions}; + my $prev = $im->{shards}; if ($pr && $prev != $n) { $pr->("shard count changed: $prev => $n\n"); - $im->{partitions} = $n; + $im->{shards} = $n; } } - PublicInbox::Admin::index_inbox($ibx, $opt); - # implicit lock_release - } else { - $im->lock_release; + PublicInbox::Admin::index_inbox($ibx, $im, $opt); } } @@ -89,7 +87,7 @@ sub cb_spawn { defined(my $pid = fork) or die "fork: $!"; return $pid if $pid > 0; $cb->($args, $opt); - exit 0; + POSIX::_exit(0); } sub runnable_or_die ($) { @@ -97,18 +95,17 @@ sub runnable_or_die ($) { which($exe) or die "$exe not found in PATH\n"; } -sub prepare_reindex ($$) { - my ($ibx, $reindex) = @_; +sub prepare_reindex ($$$) { + my ($ibx, $im, $reindex) = @_; if ($ibx->{version} == 1) { my $dir = $ibx->search->xdir(1); - my $xdb = Search::Xapian::Database->new($dir); + my $xdb = $PublicInbox::Search::X{Database}->new($dir); if (my $lc = $xdb->get_metadata('last_commit')) { $reindex->{from} = $lc; } } else { # v2 - my $v2w = $ibx->importer(0); my $max; - $v2w->git_dir_latest(\$max) or return; + $im->git_dir_latest(\$max) or return; my $from = $reindex->{from}; my $mm = $ibx->mm; my $v = PublicInbox::Search::SCHEMA_VERSION(); @@ -149,11 +146,17 @@ sub process_queue { } } +sub setup_signals () { + # http://www.tldp.org/LDP/abs/html/exitcodes.html + $SIG{INT} = sub { exit(130) }; + $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = sub { exit(1) }; +} + sub run { my ($ibx, $task, $opt) = @_; # task = 'cpdb' or 'compact' my $cb = \&${\"PublicInbox::Xapcmd::$task"}; PublicInbox::Admin::progress_prepare($opt ||= {}); - my $dir = $ibx->{mainrepo} or die "no mainrepo in inbox\n"; + my $dir = $ibx->{inboxdir} or die "no inboxdir in inbox\n"; runnable_or_die($XAPIAN_COMPACT) if $opt->{compact}; my $reindex; # v1:{ from => $x40 }, v2:{ from => [ $x40, $x40, .. ] } } my $from; # per-epoch ranges @@ -161,90 +164,95 @@ sub run { if (!$opt->{-coarse_lock}) { $reindex = $opt->{reindex} = {}; $from = $reindex->{from} = []; - require Search::Xapian::WritableDatabase; + require PublicInbox::SearchIdx; + PublicInbox::SearchIdx::load_xapian_writable(); } $ibx->umask_prepare; my $old = $ibx->search->xdir(1); -d $old or die "$old does not exist\n"; - my $tmp = PublicInbox::Xtmpdirs->new; + my $tmp = {}; my $v = $ibx->{version} ||= 1; my @q; - my $new_parts = $opt->{reshard}; - if (defined $new_parts && $new_parts <= 0) { + my $reshard = $opt->{reshard}; + if (defined $reshard && $reshard <= 0) { die "--reshard must be a positive number\n"; } + local %SIG = %SIG; + setup_signals(); + # we want temporary directories to be as deep as possible, # so v2 shards can keep "xap$SCHEMA_VERSION" on a separate FS. if ($v == 1) { - if (defined $new_parts) { + if (defined $reshard) { warn -"--reshard=$new_parts ignored for v1 $ibx->{mainrepo}\n"; +"--reshard=$reshard ignored for v1 $ibx->{inboxdir}\n"; } - my $old_parent = dirname($old); - same_fs_or_die($old_parent, $old); + my $dir = dirname($old); + same_fs_or_die($dir, $old); my $v = PublicInbox::Search::SCHEMA_VERSION(); - my $wip = tempdir("xapian$v-XXXXXXXX", DIR => $old_parent); + my $wip = File::Temp->newdir("xapian$v-XXXXXXXX", DIR => $dir); $tmp->{$old} = $wip; push @q, [ $old, $wip ]; } else { opendir my $dh, $old or die "Failed to opendir $old: $!\n"; - my @old_parts; + my @old_shards; while (defined(my $dn = readdir($dh))) { if ($dn =~ /\A[0-9]+\z/) { - push @old_parts, $dn; + push @old_shards, $dn; } elsif ($dn eq '.' || $dn eq '..') { } elsif ($dn =~ /\Aover\.sqlite3/) { } else { warn "W: skipping unknown dir: $old/$dn\n" } } - die "No Xapian parts found in $old\n" unless @old_parts; + die "No Xapian shards found in $old\n" unless @old_shards; - my ($src, $max_part); - if (!defined($new_parts) || $new_parts == scalar(@old_parts)) { + my ($src, $max_shard); + if (!defined($reshard) || $reshard == scalar(@old_shards)) { # 1:1 copy - $max_part = scalar(@old_parts) - 1; + $max_shard = scalar(@old_shards) - 1; } else { # M:N copy - $max_part = $new_parts - 1; - $src = [ map { "$old/$_" } @old_parts ]; + $max_shard = $reshard - 1; + $src = [ map { "$old/$_" } @old_shards ]; } - foreach my $dn (0..$max_part) { + foreach my $dn (0..$max_shard) { my $tmpl = "$dn-XXXXXXXX"; - my $wip = tempdir($tmpl, DIR => $old); - same_fs_or_die($old, $wip); + my $wip = File::Temp->newdir($tmpl, DIR => $old); + same_fs_or_die($old, $wip->dirname); my $cur = "$old/$dn"; push @q, [ $src // $cur , $wip ]; $tmp->{$cur} = $wip; } - # mark old parts to be unlinked + # mark old shards to be unlinked if ($src) { $tmp->{$_} ||= undef for @$src; } } - my $im = $ibx->importer(0); my $max = $opt->{jobs} || scalar(@q); $ibx->with_umask(sub { + my $im = $ibx->importer(0); $im->lock_acquire; # fine-grained locking if we prepare for reindex if (!$opt->{-coarse_lock}) { - prepare_reindex($ibx, $reindex); + prepare_reindex($ibx, $im, $reindex); $im->lock_release; } - delete($ibx->{$_}) for (qw(mm over search)); # cleanup + $ibx->cleanup; process_queue(\@q, $cb, $max, $opt); - commit_changes($ibx, $tmp, $opt); + $im->lock_acquire if !$opt->{-coarse_lock}; + commit_changes($ibx, $im, $tmp, $opt); }); } sub cpdb_retryable ($$) { my ($src, $pfx) = @_; - if (ref($@) eq 'Search::Xapian::DatabaseModifiedError') { + if (ref($@) =~ /\bDatabaseModifiedError\b/) { warn "$pfx Xapian DB modified, reopening and retrying\n"; $src->reopen; return 1; @@ -267,7 +275,8 @@ sub progress_pfx ($) { # xapian-compact wrapper sub compact ($$) { my ($args, $opt) = @_; - my ($src, $dst) = @$args; + my ($src, $newdir) = @$args; + my $dst = ref($newdir) ? $newdir->dirname : $newdir; my ($r, $w); my $pfx = $opt->{-progress_pfx} ||= progress_pfx($src); my $pr = $opt->{-progress}; @@ -277,7 +286,7 @@ sub compact ($$) { defined(my $dfd = $opt->{$fd}) or next; $rdr->{$fd} = $dfd; } - $rdr->{1} = fileno($w) if $pr && pipe($r, $w); + $rdr->{1} = $w if $pr && pipe($r, $w); # we rely on --no-renumber to keep docids synched to NNTP my $cmd = [ $XAPIAN_COMPACT, '--no-renumber' ]; @@ -305,7 +314,7 @@ sub compact ($$) { } sub cpdb_loop ($$$;$$) { - my ($src, $dst, $pr_data, $cur_part, $new_parts) = @_; + my ($src, $dst, $pr_data, $cur_shard, $reshard) = @_; my ($pr, $fmt, $nr, $pfx); if ($pr_data) { $pr = $pr_data->{pr}; @@ -326,9 +335,9 @@ sub cpdb_loop ($$$;$$) { eval { for (; $it != $end; $it++) { my $docid = $it->get_docid; - if (defined $new_parts) { - my $dst_part = $docid % $new_parts; - next if $dst_part != $cur_part; + if (defined $reshard) { + my $dst_shard = $docid % $reshard; + next if $dst_shard != $cur_shard; } my $doc = $src->get_document($docid); $dst->replace_document($docid, $doc); @@ -349,44 +358,50 @@ sub cpdb_loop ($$$;$$) { # to the overhead of Perl. sub cpdb ($$) { my ($args, $opt) = @_; - my ($old, $new) = @$args; - my ($src, $cur_part); - my $new_parts; + my ($old, $newdir) = @$args; + my $new = $newdir->dirname; + my ($src, $cur_shard); + my $reshard; + PublicInbox::SearchIdx::load_xapian_writable() or die; + my $XapianDatabase = $PublicInbox::Search::X{Database}; if (ref($old) eq 'ARRAY') { - ($cur_part) = ($new =~ m!xap[0-9]+/([0-9]+)\b!); - defined $cur_part or + ($cur_shard) = ($new =~ m!xap[0-9]+/([0-9]+)\b!); + defined $cur_shard or die "BUG: could not extract shard # from $new"; - $new_parts = $opt->{reshard}; - defined $new_parts or die 'BUG: got array src w/o --reshard'; + $reshard = $opt->{reshard}; + defined $reshard or die 'BUG: got array src w/o --reshard'; - # repartitioning, M:N copy means have full read access + # resharding, M:N copy means have full read access foreach (@$old) { if ($src) { - my $sub = Search::Xapian::Database->new($_); + my $sub = $XapianDatabase->new($_); $src->add_database($sub); } else { - $src = Search::Xapian::Database->new($_); + $src = $XapianDatabase->new($_); } } } else { - $src = Search::Xapian::Database->new($old); + $src = $XapianDatabase->new($old); } - my ($xtmp, $tmp); + my ($tmp, $ft); + local %SIG = %SIG; if ($opt->{compact}) { - my $newdir = dirname($new); - same_fs_or_die($newdir, $new); - $tmp = tempdir("$new.compact-XXXXXX", DIR => $newdir); - $xtmp = PublicInbox::Xtmpdirs->new; - $xtmp->{$new} = $tmp; + my $dir = dirname($new); + same_fs_or_die($dir, $new); + $ft = File::Temp->newdir("$new.compact-XXXXXX", DIR => $dir); + setup_signals(); + $tmp = $ft->dirname; } else { $tmp = $new; } # like copydatabase(1), be sure we don't overwrite anything in case # of other bugs: - my $creat = Search::Xapian::DB_CREATE(); - my $dst = Search::Xapian::WritableDatabase->new($tmp, $creat); + my $creat = eval($PublicInbox::Search::Xap.'::DB_CREATE()'); + die if $@; + my $XapianWritableDatabase = $PublicInbox::Search::X{WritableDatabase}; + my $dst = $XapianWritableDatabase->new($tmp, $creat); my $pr = $opt->{-progress}; my $pfx = $opt->{-progress_pfx} = progress_pfx($new); my $pr_data = { pr => $pr, pfx => $pfx, nr => 0 } if $pr; @@ -397,7 +412,7 @@ sub cpdb ($$) { my $lc = $src->get_metadata('last_commit'); $dst->set_metadata('last_commit', $lc) if $lc; - # only the first xapian partition (0) gets 'indexlevel' + # only the first xapian shard (0) gets 'indexlevel' if ($new =~ m!(?:xapian[0-9]+|xap[0-9]+/0)\b!) { my $l = $src->get_metadata('indexlevel'); if ($l eq 'medium') { @@ -407,11 +422,11 @@ sub cpdb ($$) { if ($pr_data) { my $tot = $src->get_doccount; - # we can only estimate when repartitioning, + # we can only estimate when resharding, # because removed spam causes slight imbalance my $est = ''; - if (defined $cur_part && $new_parts > 1) { - $tot = int($tot/$new_parts); + if (defined $cur_shard && $reshard > 1) { + $tot = int($tot/$reshard); $est = 'around '; } my $fmt = "$pfx % ".length($tot)."u/$tot\n"; @@ -422,22 +437,22 @@ sub cpdb ($$) { }; } while (cpdb_retryable($src, $pfx)); - if (defined $new_parts) { + if (defined $reshard) { # we rely on document IDs matching NNTP article number, - # so we can't have the combined DB support rewriting + # so we can't have the Xapian sharding DB support rewriting # document IDs. Thus we iterate through each shard # individually. $src = undef; foreach (@$old) { - my $old = Search::Xapian::Database->new($_); - cpdb_loop($old, $dst, $pr_data, $cur_part, $new_parts); + my $old = $XapianDatabase->new($_); + cpdb_loop($old, $dst, $pr_data, $cur_shard, $reshard); } } else { cpdb_loop($src, $dst, $pr_data); } $pr->(sprintf($pr_data->{fmt}, $pr_data->{nr})) if $pr; - return unless $xtmp; + return unless $opt->{compact}; $src = $dst = undef; # flushes and closes @@ -445,46 +460,6 @@ sub cpdb ($$) { # since $dst isn't readable by HTTP or NNTP clients, yet: compact([ $tmp, $new ], $opt); remove_tree($tmp) or die "failed to remove $tmp: $!\n"; - $xtmp->done; -} - -# slightly easier-to-manage manage than END{} blocks -package PublicInbox::Xtmpdirs; -use strict; -use warnings; -use File::Path qw(remove_tree); -my %owner; - -sub new { - # http://www.tldp.org/LDP/abs/html/exitcodes.html - $SIG{INT} = sub { exit(130) }; - $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = sub { exit(1) }; - my $self = bless {}, $_[0]; # old partition => new (tmp) partition - $owner{"$self"} = $$; - $self; -} - -sub done { - my ($self) = @_; - delete $owner{"$self"}; - - my %known_pids; - $known_pids{$_}++ foreach values %owner; - if (!$known_pids{$$}) { - $SIG{INT} = $SIG{HUP} = $SIG{PIPE} = $SIG{TERM} = 'DEFAULT'; - } - %$self = (); -} - -sub DESTROY { - my ($self) = @_; - my $owner_pid = delete $owner{"$self"} or return; - return if $owner_pid != $$; - foreach my $new (values %$self) { - defined $new or next; # may be undef if repartitioning - remove_tree($new) unless -d "$new/old"; - } - done($self); } 1;