X-Git-Url: http://www.git.stargrave.org/?a=blobdiff_plain;f=lib%2FPublicInbox%2FV2Writable.pm;h=c8334645d3cbcff5bf0b290bda75db7bd8113022;hb=d73d783ab2cf14ba28ca63723223d8c85a68cdd5;hp=9a58a7a94123eaee4f03d3f1870bab719d6d3237;hpb=95e35e1f546dfec0294380e958ae3b4f4598ce03;p=public-inbox.git diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm index 9a58a7a9..c8334645 100644 --- a/lib/PublicInbox/V2Writable.pm +++ b/lib/PublicInbox/V2Writable.pm @@ -8,7 +8,6 @@ use strict; use v5.10.1; use parent qw(PublicInbox::Lock); use PublicInbox::SearchIdxShard; -use PublicInbox::IdxStack; use PublicInbox::Eml; use PublicInbox::Git; use PublicInbox::Import; @@ -18,9 +17,9 @@ use PublicInbox::InboxWritable; use PublicInbox::OverIdx; use PublicInbox::Msgmap; use PublicInbox::Spawn qw(spawn popen_rd); -use PublicInbox::SearchIdx qw(too_big log2stack crlf_adjust is_ancestor); +use PublicInbox::SearchIdx qw(log2stack crlf_adjust is_ancestor check_size); use IO::Handle; # ->autoflush -use File::Temp qw(tempfile); +use File::Temp (); my $OID = qr/[a-f0-9]{40,}/; # an estimate of the post-packed size to the raw uncompressed size @@ -35,14 +34,13 @@ my $PACKING_FACTOR = 0.4; our $NPROC_MAX_DEFAULT = 4; sub detect_nproc () { - for my $nproc (qw(nproc gnproc)) { # GNU coreutils nproc - `$nproc 2>/dev/null` =~ /^(\d+)$/ and return $1; - } - # getconf(1) is POSIX, but *NPROCESSORS* vars are not for (qw(_NPROCESSORS_ONLN NPROCESSORS_ONLN)) { `getconf $_ 2>/dev/null` =~ /^(\d+)$/ and return $1; } + for my $nproc (qw(nproc gnproc)) { # GNU coreutils nproc + `$nproc 2>/dev/null` =~ /^(\d+)$/ and return $1; + } # should we bother with `sysctl hw.ncpu`? Those only give # us total processor count, not online processor count. @@ -116,15 +114,14 @@ sub new { total_bytes => 0, current_info => '', xpfx => $xpfx, - over => PublicInbox::OverIdx->new("$xpfx/over.sqlite3"), + oidx => PublicInbox::OverIdx->new("$xpfx/over.sqlite3"), lock_path => "$dir/inbox.lock", # limit each git repo (epoch) to 1GB or so rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR), last_commit => [], # git epoch -> commit }; - $self->{over}->{-no_sync} = 1 if $v2ibx->{-no_sync}; + $self->{oidx}->{-no_fsync} = 1 if $v2ibx->{-no_fsync}; $self->{shards} = count_shards($self) || nproc_shards($creat); - $self->{index_max_size} = $v2ibx->{index_max_size}; bless $self, $class; } @@ -157,11 +154,11 @@ sub add { sub do_idx ($$$$) { my ($self, $msgref, $mime, $smsg) = @_; $smsg->{bytes} = $smsg->{raw_bytes} + crlf_adjust($$msgref); - $self->{over}->add_overview($mime, $smsg); + $self->{oidx}->add_overview($mime, $smsg); my $idx = idx_shard($self, $smsg->{num} % $self->{shards}); $idx->index_raw($msgref, $mime, $smsg); my $n = $self->{transact_bytes} += $smsg->{raw_bytes}; - $n >= ($PublicInbox::SearchIdx::BATCH_BYTES * $self->{shards}); + $n >= $self->{batch_bytes}; } sub _add { @@ -198,7 +195,7 @@ sub _add { sub v2_num_for { my ($self, $mime) = @_; - my $mids = mids($mime->header_obj); + my $mids = mids($mime); if (@$mids) { my $mid = $mids->[0]; my $num = $self->{mm}->mid_insert($mid); @@ -222,7 +219,7 @@ sub v2_num_for { if ($altid && grep(/:file=msgmap\.sqlite3\z/, @$altid)) { my $num = $self->{mm}->num_for($mid); - if (defined $num && !$self->{over}->get_art($num)) { + if (defined $num && !$self->{oidx}->get_art($num)) { return ($num, $mid); } } @@ -245,28 +242,27 @@ sub v2_num_for { } sub v2_num_for_harder { - my ($self, $mime) = @_; + my ($self, $eml) = @_; - my $hdr = $mime->header_obj; - my $dig = content_digest($mime); - my $mid0 = PublicInbox::Import::digest2mid($dig, $hdr); + my $dig = content_digest($eml); + my $mid0 = PublicInbox::Import::digest2mid($dig, $eml); my $num = $self->{mm}->mid_insert($mid0); unless (defined $num) { # it's hard to spoof the last Received: header - my @recvd = $hdr->header_raw('Received'); + my @recvd = $eml->header_raw('Received'); $dig->add("Received: $_") foreach (@recvd); - $mid0 = PublicInbox::Import::digest2mid($dig, $hdr); + $mid0 = PublicInbox::Import::digest2mid($dig, $eml); $num = $self->{mm}->mid_insert($mid0); # fall back to a random Message-ID and give up determinism: until (defined($num)) { $dig->add(rand); - $mid0 = PublicInbox::Import::digest2mid($dig, $hdr); + $mid0 = PublicInbox::Import::digest2mid($dig, $eml); warn "using random Message-ID <$mid0> as fallback\n"; $num = $self->{mm}->mid_insert($mid0); } } - PublicInbox::Import::append_mid($hdr, $mid0); + PublicInbox::Import::append_mid($eml, $mid0); ($num, $mid0); } @@ -278,11 +274,14 @@ sub idx_shard { sub _idx_init { # with_umask callback my ($self, $opt) = @_; $self->lock_acquire unless $opt && $opt->{-skip_lock}; - $self->{over}->create; + $self->{oidx}->create; # xcpdb can change shard count while -watch is idle my $nshards = count_shards($self); $self->{shards} = $nshards if $nshards && $nshards != $self->{shards}; + $self->{batch_bytes} = $opt->{batch_size} // + $PublicInbox::SearchIdx::BATCH_BYTES; + $self->{batch_bytes} *= $self->{shards} if $self->{parallel}; # need to create all shards before initializing msgmap FD # idx_shards must be visible to all forked processes @@ -294,7 +293,7 @@ sub _idx_init { # with_umask callback # for SQLite: my $mm = $self->{mm} = PublicInbox::Msgmap->new_file( "$self->{ibx}->{inboxdir}/msgmap.sqlite3", - $self->{ibx}->{-no_sync} ? 2 : 1); + $self->{ibx}->{-no_fsync} ? 2 : 1); $mm->{dbh}->begin_work; } @@ -307,8 +306,8 @@ sub idx_init { # do not leak read-only FDs to child processes, we only have these # FDs for duplicate detection so they should not be # frequently activated. - # delete @$ibx{qw(git mm search)}; - delete $ibx->{$_} foreach (qw(git mm search)); + delete @$ibx{qw(mm search)}; + $ibx->git->cleanup; $self->{parallel} = 0 if ($ibx->{indexlevel}//'') eq 'basic'; if ($self->{parallel}) { @@ -382,10 +381,10 @@ sub rewrite_internal ($$;$$$) { } else { $im = $self->importer; } - my $over = $self->{over}; + my $oidx = $self->{oidx}; my $chashes = content_hashes($old_eml); my $removed = []; - my $mids = mids($old_eml->header_obj); + my $mids = mids($old_eml); # We avoid introducing new blobs into git since the raw content # can be slightly different, so we do not need the user-supplied @@ -396,7 +395,7 @@ sub rewrite_internal ($$;$$$) { foreach my $mid (@$mids) { my %gone; # num => [ smsg, $mime, raw ] my ($id, $prev); - while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) { + while (my $smsg = $oidx->next_by_mid($mid, \$id, \$prev)) { my $msg = get_blob($self, $smsg); if (!defined($msg)) { warn "broken smsg for $mid\n"; @@ -515,9 +514,7 @@ sub _check_mids_match ($$$) { # Message-IDs are pretty complex and rethreading hasn't been fully # implemented, yet. sub check_mids_match ($$) { - my ($old_mime, $new_mime) = @_; - my $old = $old_mime->header_obj; - my $new = $new_mime->header_obj; + my ($old, $new) = @_; _check_mids_match(mids($old), mids($new), 'Message-ID(s)'); _check_mids_match(references($old), references($new), 'References/In-Reply-To'); @@ -626,19 +623,19 @@ sub checkpoint ($;$) { $dbh->commit; # SQLite overview is third - $self->{over}->commit_lazy; + $self->{oidx}->commit_lazy; # Now deal with Xapian if ($wait) { my $barrier = $self->barrier_init(scalar @$shards); # each shard needs to issue a barrier command - $_->remote_barrier for @$shards; + $_->shard_barrier for @$shards; # wait for each Xapian shard $self->barrier_wait($barrier); } else { - $_->remote_commit for @$shards; + $_->shard_commit for @$shards; } # last_commit is special, don't commit these until @@ -658,24 +655,41 @@ sub checkpoint ($;$) { # public sub barrier { checkpoint($_[0], 1) }; +# true if locked and active +sub active { !!$_[0]->{im} } + # public sub done { my ($self) = @_; - my $im = delete $self->{im}; - $im->done if $im; # PublicInbox::Import::done - checkpoint($self); - my $mm = delete $self->{mm}; - $mm->{dbh}->commit if $mm; + my $err = ''; + if (my $im = delete $self->{im}) { + eval { $im->done }; # PublicInbox::Import::done + $err .= "import done: $@\n" if $@; + } + if (!$err) { + eval { checkpoint($self) }; + $err .= "checkpoint: $@\n" if $@; + } + if (my $mm = delete $self->{mm}) { + my $m = $err ? 'rollback' : 'commit'; + eval { $mm->{dbh}->$m }; + $err .= "msgmap $m: $@\n" if $@; + } my $shards = delete $self->{idx_shards}; if ($shards) { - $_->remote_close for @$shards; + for (@$shards) { + eval { $_->shard_close }; + $err .= "shard close: $@\n" if $@; + } } - $self->{over}->disconnect; + eval { $self->{oidx}->dbh_close }; + $err .= "over close: $@\n" if $@; delete $self->{bnote}; my $nbytes = $self->{total_bytes}; $self->{total_bytes} = 0; $self->lock_release(!!$nbytes) if $shards; $self->{ibx}->git->cleanup; + die $err if $err; } sub fill_alternates ($$) { @@ -717,12 +731,14 @@ sub fill_alternates ($$) { } return unless $new; - my ($fh, $tmp) = tempfile('alt-XXXXXXXX', DIR => $info_dir); + my $fh = File::Temp->new(TEMPLATE => 'alt-XXXXXXXX', DIR => $info_dir); + my $tmp = $fh->filename; print $fh join("\n", sort { $alt{$b} <=> $alt{$a} } keys %alt), "\n" or die "print $tmp: $!\n"; chmod($mode, $fh) or die "fchmod $tmp: $!\n"; close $fh or die "close $tmp $!\n"; rename($tmp, $alt) or die "rename $tmp => $alt: $!\n"; + $fh->unlink_on_destroy(0); } sub git_init { @@ -803,18 +819,17 @@ sub import_init { sub diff ($$$) { my ($mid, $cur, $new) = @_; - my ($ah, $an) = tempfile('email-cur-XXXXXXXX', TMPDIR => 1); + my $ah = File::Temp->new(TEMPLATE => 'email-cur-XXXXXXXX', TMPDIR => 1); print $ah $cur->as_string or die "print: $!"; - close $ah or die "close: $!"; - my ($bh, $bn) = tempfile('email-new-XXXXXXXX', TMPDIR => 1); + $ah->flush or die "flush: $!"; PublicInbox::Import::drop_unwanted_headers($new); + my $bh = File::Temp->new(TEMPLATE => 'email-new-XXXXXXXX', TMPDIR => 1); print $bh $new->as_string or die "print: $!"; - close $bh or die "close: $!"; - my $cmd = [ qw(diff -u), $an, $bn ]; + $bh->flush or die "flush: $!"; + my $cmd = [ qw(diff -u), $ah->filename, $bh->filename ]; print STDERR "# MID conflict <$mid>\n"; my $pid = spawn($cmd, undef, { 1 => 2 }); waitpid($pid, 0) == $pid or die "diff did not finish"; - unlink($an, $bn); } sub get_blob ($$) { @@ -829,10 +844,10 @@ sub get_blob ($$) { sub content_exists ($$$) { my ($self, $mime, $mid) = @_; - my $over = $self->{over}; + my $oidx = $self->{oidx}; my $chashes = content_hashes($mime); my ($id, $prev); - while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) { + while (my $smsg = $oidx->next_by_mid($mid, \$id, \$prev)) { my $msg = get_blob($self, $smsg); if (!defined($msg)) { warn "broken smsg for $mid\n"; @@ -863,27 +878,29 @@ sub atfork_child { sub reindex_checkpoint ($$) { my ($self, $sync) = @_; - $sync->{mm_tmp}->atfork_prepare; + $self->{ibx}->git->cleanup; # *async_wait + ${$sync->{need_checkpoint}} = 0; + my $mm_tmp = $sync->{mm_tmp}; + $mm_tmp->atfork_prepare if $mm_tmp; $self->done; # release lock if (my $pr = $sync->{-opt}->{-progress}) { - $pr->(sprintf($sync->{-regen_fmt}, $sync->{nr})); + $pr->(sprintf($sync->{-regen_fmt}, ${$sync->{nr}})); } # allow -watch or -mda to write... - $self->idx_init; # reacquire lock - $sync->{mm_tmp}->atfork_parent; + $self->idx_init($sync->{-opt}); # reacquire lock + $mm_tmp->atfork_parent if $mm_tmp; } -sub reindex_oid ($$$) { - my ($self, $sync, $oid) = @_; - return if too_big($self, $oid); - my ($num, $mid0, $len); - my $msgref = $self->{ibx}->git->cat_file($oid, \$len); - return if $len == 0; # purged - my $mime = PublicInbox::Eml->new($$msgref); - my $mids = mids($mime->header_obj); - my $chash = content_hash($mime); +sub index_oid { # cat_async callback + my ($bref, $oid, $type, $size, $arg) = @_; + return if $size == 0; # purged + my ($num, $mid0); + my $eml = PublicInbox::Eml->new($$bref); + my $mids = mids($eml); + my $chash = content_hash($eml); + my $self = $arg->{v2w}; if (scalar(@$mids) == 0) { warn "E: $oid has no Message-ID, skipping\n"; @@ -891,28 +908,28 @@ sub reindex_oid ($$$) { } # {unindexed} is unlikely - if ((my $unindexed = $self->{unindexed}) && scalar(@$mids) == 1) { + if ((my $unindexed = $arg->{unindexed}) && scalar(@$mids) == 1) { $num = delete($unindexed->{$mids->[0]}); if (defined $num) { $mid0 = $mids->[0]; $self->{mm}->mid_set($num, $mid0); - delete($self->{unindexed}) if !keys(%$unindexed); + delete($arg->{unindexed}) if !keys(%$unindexed); } } if (!defined($num)) { # reuse if reindexing (or duplicates) - my $over = $self->{over}; + my $oidx = $self->{oidx}; for my $mid (@$mids) { - ($num, $mid0) = $over->num_mid0_for_oid($oid, $mid); + ($num, $mid0) = $oidx->num_mid0_for_oid($oid, $mid); last if defined $num; } } $mid0 //= do { # is this a number we got before? - $num = $sync->{mm_tmp}->num_for($mids->[0]); + $num = $arg->{mm_tmp}->num_for($mids->[0]); defined($num) ? $mids->[0] : undef; }; if (!defined($num)) { for (my $i = $#$mids; $i >= 1; $i--) { - $num = $sync->{mm_tmp}->num_for($mids->[$i]); + $num = $arg->{mm_tmp}->num_for($mids->[$i]); if (defined($num)) { $mid0 = $mids->[$i]; last; @@ -920,7 +937,7 @@ sub reindex_oid ($$$) { } } if (defined($num)) { - $sync->{mm_tmp}->num_delete($num); + $arg->{mm_tmp}->num_delete($num); } else { # never seen $num = $self->{mm}->mid_insert($mids->[0]); if (defined($num)) { @@ -939,16 +956,16 @@ sub reindex_oid ($$$) { warn "E: $oid <", join('> <', @$mids), "> is a duplicate\n"; return; } - $sync->{nr}++; + ++${$arg->{nr}}; my $smsg = bless { - raw_bytes => $len, + raw_bytes => $size, num => $num, blob => $oid, mid => $mid0, }, 'PublicInbox::Smsg'; - $smsg->populate($mime, $sync); - if (do_idx($self, $msgref, $mime, $smsg)) { - reindex_checkpoint($self, $sync); + $smsg->populate($eml, $arg); + if (do_idx($self, $bref, $eml, $smsg)) { + ${$arg->{need_checkpoint}} = 1; } } @@ -981,7 +998,7 @@ sub log_range ($$$$$) { my $opt = $sync->{-opt}; my $pr = $opt->{-progress} if (($opt->{verbose} || 0) > 1); my $cur = $sync->{ranges}->[$i] or do { - $pr->("$i.git indexing all of $tip") if $pr; + $pr->("$i.git indexing all of $tip\n") if $pr; return $tip; # all of it }; @@ -1065,44 +1082,50 @@ sub sync_prepare ($$$) { # our code and blindly injects "d" file history into git repos if (my @leftovers = keys %{delete($sync->{D}) // {}}) { warn('W: unindexing '.scalar(@leftovers)." leftovers\n"); + my $arg = { v2w => $self }; + my $all = $self->{ibx}->git; for my $oid (@leftovers) { $oid = unpack('H*', $oid); $self->{current_info} = "leftover $oid"; - unindex_oid($self, $oid); + $all->cat_async($oid, \&unindex_oid, $arg); } + $all->cat_async_wait; + } + if (!$regen_max && !keys(%{$self->{unindex_range}})) { + $sync->{-regen_fmt} = "%u/?\n"; + return 0; } - return 0 if (!$regen_max && !keys(%{$self->{unindex_range}})); # reindex should NOT see new commits anymore, if we do, # it's a problem and we need to notice it via die() my $pad = length($regen_max) + 1; $sync->{-regen_fmt} = "% ${pad}u/$regen_max\n"; - $sync->{nr} = 0; + $sync->{nr} = \(my $nr = 0); return -1 if $sync->{reindex}; $regen_max + $self->{mm}->num_highwater() || 0; } sub unindex_oid_remote ($$$) { my ($self, $oid, $mid) = @_; - my @removed = $self->{over}->remove_oid($oid, $mid); + my @removed = $self->{oidx}->remove_oid($oid, $mid); for my $num (@removed) { my $idx = idx_shard($self, $num % $self->{shards}); - $idx->remote_remove($oid, $num); + $idx->shard_remove($oid, $num); } } -sub unindex_oid ($$;$) { - my ($self, $oid, $unindexed) = @_; +sub unindex_oid ($$;$) { # git->cat_async callback + my ($bref, $oid, $type, $size, $sync) = @_; + my $self = $sync->{v2w}; + my $unindexed = $sync->{in_unindex} ? $sync->{unindexed} : undef; my $mm = $self->{mm}; - my $msgref = $self->{ibx}->git->cat_file($oid); - my $mime = PublicInbox::Eml->new($msgref); - my $mids = mids($mime->header_obj); - $mime = $msgref = undef; - my $over = $self->{over}; + my $mids = mids(PublicInbox::Eml->new($bref)); + undef $$bref; + my $oidx = $self->{oidx}; foreach my $mid (@$mids) { my %gone; my ($id, $prev); - while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) { + while (my $smsg = $oidx->next_by_mid($mid, \$id, \$prev)) { $gone{$smsg->{num}} = 1 if $oid eq $smsg->{blob}; } my $n = scalar(keys(%gone)) or next; @@ -1125,17 +1148,20 @@ sub unindex_oid ($$;$) { # a mirror because the source used -purge or -edit sub unindex ($$$$) { my ($self, $sync, $git, $unindex_range) = @_; - my $unindexed = $self->{unindexed} ||= {}; # $mid0 => $num + my $unindexed = $sync->{unindexed} //= {}; # $mid0 => $num my $before = scalar keys %$unindexed; # order does not matter, here: my @cmd = qw(log --raw -r --no-notes --no-color --no-abbrev --no-renames); my $fh = $git->popen(@cmd, $unindex_range); + my $all = $self->{ibx}->git; + local $sync->{in_unindex} = 1; while (<$fh>) { /\A:\d{6} 100644 $OID ($OID) [AM]\tm$/o or next; - unindex_oid($self, $1, $unindexed); + $all->cat_async($1, \&unindex_oid, $sync); } close $fh or die "git log failed: \$?=$?"; + $all->cat_async_wait; return unless $sync->{-opt}->{prune}; my $after = scalar keys %$unindexed; @@ -1160,6 +1186,37 @@ sub sync_ranges ($$$) { $ranges; } +sub index_xap_only { # git->cat_async callback + my ($bref, $oid, $type, $size, $smsg) = @_; + my $self = $smsg->{v2w}; + my $idx = idx_shard($self, $smsg->{num} % $self->{shards}); + $smsg->{raw_bytes} = $size; + $idx->index_raw($bref, undef, $smsg); + $self->{transact_bytes} += $size; +} + +sub index_xap_step ($$$;$) { + my ($self, $sync, $beg, $step) = @_; + my $end = $sync->{art_end}; + return if $beg > $end; # nothing to do + + $step //= $self->{shards}; + my $ibx = $self->{ibx}; + if (my $pr = $sync->{-opt}->{-progress}) { + $pr->("Xapian indexlevel=$ibx->{indexlevel} ". + "$beg..$end (% $step)\n"); + } + for (my $num = $beg; $num <= $end; $num += $step) { + my $smsg = $ibx->over->get_art($num) or next; + $smsg->{v2w} = $self; + $ibx->git->cat_async($smsg->{blob}, \&index_xap_only, $smsg); + if ($self->{transact_bytes} >= $self->{batch_bytes}) { + ${$sync->{nr}} = $num; + reindex_checkpoint($self, $sync); + } + } +} + sub index_epoch ($$$) { my ($self, $sync, $i) = @_; @@ -1171,35 +1228,84 @@ sub index_epoch ($$$) { } defined(my $stk = $sync->{stacks}->[$i]) or return; $sync->{stacks}->[$i] = undef; + my $all = $self->{ibx}->git; while (my ($f, $at, $ct, $oid) = $stk->pop_rec) { $self->{current_info} = "$i.git $oid"; if ($f eq 'm') { - $sync->{autime} = $at; - $sync->{cotime} = $ct; - reindex_oid($self, $sync, $oid); + my $arg = { %$sync, autime => $at, cotime => $ct }; + if ($sync->{max_size}) { + $all->check_async($oid, \&check_size, $arg); + } else { + $all->cat_async($oid, \&index_oid, $arg); + } } elsif ($f eq 'd') { - unindex_oid($self, $oid); + $all->cat_async($oid, \&unindex_oid, $sync); + } + if (${$sync->{need_checkpoint}}) { + reindex_checkpoint($self, $sync); } } - delete @$sync{qw(autime cotime)}; + $all->check_async_wait; + $all->cat_async_wait; update_last_commit($self, $git, $i, $stk->{latest_cmt}); } +sub xapian_only { + my ($self, $opt, $sync, $art_beg) = @_; + my $seq = $opt->{sequential_shard}; + $art_beg //= 0; + local $self->{parallel} = 0 if $seq; + $self->idx_init($opt); # acquire lock + if (my $art_end = $self->{ibx}->mm->max) { + $sync //= { + need_checkpoint => \(my $bool = 0), + -opt => $opt, + v2w => $self, + nr => \(my $nr = 0), + -regen_fmt => "%u/?\n", + }; + $sync->{art_end} = $art_end; + if ($seq || !$self->{parallel}) { + my $shard_end = $self->{shards} - 1; + for my $i (0..$shard_end) { + index_xap_step($self, $sync, $art_beg + $i); + if ($i != $shard_end) { + reindex_checkpoint($self, $sync); + } + } + } else { # parallel (maybe) + index_xap_step($self, $sync, $art_beg, 1); + } + } + $self->{ibx}->git->cat_async_wait; + $self->done; +} + # public, called by public-inbox-index sub index_sync { my ($self, $opt) = @_; - $opt ||= {}; + $opt //= $_[1] //= {}; + goto \&xapian_only if $opt->{xapian_only}; + my $pr = $opt->{-progress}; my $epoch_max; my $latest = git_dir_latest($self, \$epoch_max); return unless defined $latest; + + my $seq = $opt->{sequential_shard}; + my $art_beg; # the NNTP article number we start xapian_only at + my $idxlevel = $self->{ibx}->{indexlevel}; + local $self->{ibx}->{indexlevel} = 'basic' if $seq; + $self->idx_init($opt); # acquire lock fill_alternates($self, $epoch_max); - $self->{over}->rethread_prepare($opt); + $self->{oidx}->rethread_prepare($opt); my $sync = { + need_checkpoint => \(my $bool = 0), unindex_range => {}, # EPOCH => oid_old..oid_new reindex => $opt->{reindex}, - -opt => $opt + -opt => $opt, + v2w => $self, }; $sync->{ranges} = sync_ranges($self, $sync, $epoch_max); if (sync_prepare($self, $sync, $epoch_max)) { @@ -1209,18 +1315,45 @@ sub index_sync { # only for batch performance. $self->{mm}->{dbh}->rollback; $self->{mm}->{dbh}->begin_work; - $sync->{mm_tmp} = $self->{mm}->tmp_clone; - } + $sync->{mm_tmp} = + $self->{mm}->tmp_clone($self->{ibx}->{inboxdir}); + # xapian_only works incrementally w/o --reindex + if ($seq && !$opt->{reindex}) { + $art_beg = $sync->{mm_tmp}->max; + $art_beg++ if defined($art_beg); + } + } + if ($sync->{max_size} = $opt->{max_size}) { + $sync->{index_oid} = \&index_oid; + } # work forwards through history index_epoch($self, $sync, $_) for (0..$epoch_max); + $self->{oidx}->rethread_done($opt); $self->done; if (my $nr = $sync->{nr}) { my $pr = $sync->{-opt}->{-progress}; - $pr->('all.git '.sprintf($sync->{-regen_fmt}, $nr)) if $pr; + $pr->('all.git '.sprintf($sync->{-regen_fmt}, $$nr)) if $pr; + } + + # deal with Xapian shards sequentially + if ($seq && delete($sync->{mm_tmp})) { + $self->{ibx}->{indexlevel} = $idxlevel; + xapian_only($self, $opt, $sync, $art_beg); + } + + # --reindex on the command-line + if ($opt->{reindex} && !ref($opt->{reindex}) && $idxlevel ne 'basic') { + $self->lock_acquire; + my $s0 = PublicInbox::SearchIdx->new($self->{ibx}, 0, 0); + if (my $xdb = $s0->idx_acquire) { + my $n = $xdb->get_metadata('has_threadid'); + $xdb->set_metadata('has_threadid', '1') if $n ne '1'; + } + $s0->idx_release; + $self->lock_release; } - $self->{over}->rethread_done($opt); # reindex does not pick up new changes, so we rerun w/o it: if ($opt->{reindex}) {