X-Git-Url: http://www.git.stargrave.org/?a=blobdiff_plain;f=lib%2FPublicInbox%2FV2Writable.pm;h=724fa804356265c4449898f47737351cf624d088;hb=af0b0fb7a454470a32c452119d0392e0dedb3fe1;hp=efda79074de1fd563c5cd81e086a45e199f7d5f5;hpb=fe0da19e2917a47d7366a8c67e39a035f6018fd6;p=public-inbox.git diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm index efda7907..724fa804 100644 --- a/lib/PublicInbox/V2Writable.pm +++ b/lib/PublicInbox/V2Writable.pm @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2020 all contributors +# Copyright (C) 2018-2021 all contributors # License: AGPL-3.0+ # This interface wraps and mimics PublicInbox::Import @@ -16,14 +16,15 @@ use PublicInbox::ContentHash qw(content_hash content_digest); use PublicInbox::InboxWritable; use PublicInbox::OverIdx; use PublicInbox::Msgmap; -use PublicInbox::Spawn qw(spawn popen_rd); -use PublicInbox::SearchIdx qw(log2stack crlf_adjust is_ancestor check_size); +use PublicInbox::Spawn qw(spawn popen_rd run_die); +use PublicInbox::SearchIdx qw(log2stack crlf_adjust is_ancestor check_size + is_bad_blob); use IO::Handle; # ->autoflush use File::Temp (); my $OID = qr/[a-f0-9]{40,}/; # an estimate of the post-packed size to the raw uncompressed size -my $PACKING_FACTOR = 0.4; +our $PACKING_FACTOR = 0.4; # SATA storage lags behind what CPUs are capable of, so relying on # nproc(1) can be misleading and having extra Xapian shards is a @@ -65,13 +66,15 @@ sub nproc_shards ($) { sub count_shards ($) { my ($self) = @_; - $self->{ibx} ? do { + if (my $ibx = $self->{ibx}) { # always load existing shards in case core count changes: # Also, shard count may change while -watch is running - my $srch = $self->{ibx}->search or return 0; - delete $self->{ibx}->{search}; + my $srch = $ibx->search or return 0; + delete $ibx->{search}; $srch->{nshard} // 0 - } : $self->{nshard}; # self->{nshard} is for ExtSearchIdx + } else { # ExtSearchIdx + $self->{nshard} ||= scalar($self->xdb_shards_flat); + } } sub new { @@ -88,8 +91,6 @@ sub new { die "$dir does not exist\n"; } } - $v2ibx->umask_prepare; - my $xpfx = "$dir/xap" . PublicInbox::Search::SCHEMA_VERSION; my $self = { ibx => $v2ibx, @@ -119,12 +120,9 @@ sub init_inbox { } $self->idx_init; $self->{mm}->skip_artnum($skip_artnum) if defined $skip_artnum; - my $epoch_max = -1; - $self->{ibx}->git_dir_latest(\$epoch_max); - if (defined $skip_epoch && $epoch_max == -1) { - $epoch_max = $skip_epoch; - } - $self->git_init($epoch_max >= 0 ? $epoch_max : 0); + my $max = $self->{ibx}->max_git_epoch; + $max = $skip_epoch if (defined($skip_epoch) && !defined($max)); + $self->git_init($max // 0); $self->done; } @@ -266,13 +264,17 @@ sub _idx_init { # with_umask callback $self->{shards} = $nshards if $nshards && $nshards != $self->{shards}; $self->{batch_bytes} = $opt->{batch_size} // $PublicInbox::SearchIdx::BATCH_BYTES; - $self->{batch_bytes} *= $self->{shards} if $self->{parallel}; # need to create all shards before initializing msgmap FD # idx_shards must be visible to all forked processes my $max = $self->{shards} - 1; my $idx = $self->{idx_shards} = []; push @$idx, PublicInbox::SearchIdxShard->new($self, $_) for (0..$max); + + # SearchIdxShard may do their own flushing, so don't scale + # until after forking + $self->{batch_bytes} *= $self->{shards} if $self->{parallel}; + my $ibx = $self->{ibx} or return; # ExtIdxSearch # Now that all subprocesses are up, we can open the FDs @@ -310,7 +312,6 @@ sub idx_init { $ibx->git->cleanup; parallel_init($self, $ibx->{indexlevel}); - $ibx->umask_prepare; $ibx->with_umask(\&_idx_init, $self, $opt); } @@ -323,12 +324,7 @@ sub _replace_oids ($$$) { my $ibx = $self->{ibx}; my $pfx = "$ibx->{inboxdir}/git"; my $rewrites = []; # epoch => commit - my $max = $self->{epoch_max}; - - unless (defined($max)) { - defined(my $latest = $ibx->git_dir_latest(\$max)) or return; - $self->{epoch_max} = $max; - } + my $max = $self->{epoch_max} //= $ibx->max_git_epoch // return; foreach my $i (0..$max) { my $git_dir = "$pfx/$i.git"; @@ -567,7 +563,7 @@ sub last_epoch_commit ($$;$) { $self->{mm}->last_commit_xap($v, $i, $cmt); } -sub set_last_commits ($) { +sub set_last_commits ($) { # this is NOT for ExtSearchIdx my ($self) = @_; defined(my $epoch_max = $self->{epoch_max}) or return; my $last_commit = $self->{last_commit}; @@ -620,17 +616,20 @@ sub checkpoint ($;$) { # Now deal with Xapian if ($wait) { - my $barrier = $self->barrier_init(scalar @$shards); + my $barrier = barrier_init($self, scalar @$shards); # each shard needs to issue a barrier command $_->shard_barrier for @$shards; # wait for each Xapian shard - $self->barrier_wait($barrier); + barrier_wait($self, $barrier); } else { $_->shard_commit for @$shards; } + my $midx = $self->{midx}; # misc index + $midx->commit_txn if $midx; + # last_commit is special, don't commit these until # Xapian shards are done: $dbh->begin_work if $dbh; @@ -639,6 +638,7 @@ sub checkpoint ($;$) { $dbh->commit; $dbh->begin_work; } + $midx->begin_txn if $midx; } $self->{total_bytes} += $self->{transact_bytes}; $self->{transact_bytes} = 0; @@ -678,6 +678,7 @@ sub done { } eval { $self->{oidx}->dbh_close }; $err .= "over close: $@\n" if $@; + delete $self->{midx}; delete $self->{bnote}; my $nbytes = $self->{total_bytes}; $self->{total_bytes} = 0; @@ -744,9 +745,8 @@ sub git_init { my ($self, $epoch) = @_; my $git_dir = "$self->{ibx}->{inboxdir}/git/$epoch.git"; PublicInbox::Import::init_bare($git_dir); - my @cmd = (qw/git config/, "--file=$git_dir/config", - 'include.path', '../../all.git/config'); - PublicInbox::Import::run_die(\@cmd); + run_die([qw(git config), "--file=$git_dir/config", + qw(include.path ../../all.git/config)]); fill_alternates($self, $epoch); $git_dir } @@ -860,29 +860,50 @@ sub atfork_child { sub reindex_checkpoint ($$) { my ($self, $sync) = @_; - $self->git->cleanup; # *async_wait + $self->git->async_wait_all; + $self->update_last_commit($sync); ${$sync->{need_checkpoint}} = 0; my $mm_tmp = $sync->{mm_tmp}; $mm_tmp->atfork_prepare if $mm_tmp; - $self->done; # release lock + die 'BUG: {im} during reindex' if $self->{im}; + if ($self->{ibx_map} && !$sync->{checkpoint_unlocks}) { + checkpoint($self, 1); # no need to release lock on pure index + } else { + $self->done; # release lock + } - if (my $pr = $sync->{-opt}->{-progress}) { + if (my $pr = $sync->{-regen_fmt} ? $sync->{-opt}->{-progress} : undef) { $pr->(sprintf($sync->{-regen_fmt}, ${$sync->{nr}})); } # allow -watch or -mda to write... $self->idx_init($sync->{-opt}); # reacquire lock + if (my $intvl = $sync->{check_intvl}) { # eidx + $sync->{next_check} = PublicInbox::DS::now() + $intvl; + } $mm_tmp->atfork_parent if $mm_tmp; } +sub index_finalize ($$) { + my ($arg, $index) = @_; + ++$arg->{self}->{nidx}; + if (defined(my $cur = $arg->{cur_cmt})) { + ${$arg->{latest_cmt}} = $cur; + } elsif ($index) { + die 'BUG: {cur_cmt} missing'; + } # else { unindexing @leftovers doesn't set {cur_cmt} +} + sub index_oid { # cat_async callback my ($bref, $oid, $type, $size, $arg) = @_; - return if $size == 0; # purged + is_bad_blob($oid, $type, $size, $arg->{oid}) and + return index_finalize($arg, 1); # size == 0 purged returns here + my $self = $arg->{self}; + local $self->{current_info} = "$self->{current_info} $oid"; my ($num, $mid0); my $eml = PublicInbox::Eml->new($$bref); my $mids = mids($eml); my $chash = content_hash($eml); - my $self = $arg->{self}; if (scalar(@$mids) == 0) { warn "E: $oid has no Message-ID, skipping\n"; @@ -949,19 +970,22 @@ sub index_oid { # cat_async callback if (do_idx($self, $bref, $eml, $smsg)) { ${$arg->{need_checkpoint}} = 1; } + index_finalize($arg, 1); } # only update last_commit for $i on reindex iff newer than current -# $sync will be used by subclasses sub update_last_commit { - my ($self, $sync, $unit, $cmt) = @_; + my ($self, $sync, $stk) = @_; + my $unit = $sync->{unit} // return; + my $latest_cmt = $stk ? $stk->{latest_cmt} : ${$sync->{latest_cmt}}; + defined($latest_cmt) or return; my $last = last_epoch_commit($self, $unit->{epoch}); - if (defined $last && is_ancestor($unit->{git}, $last, $cmt)) { - my @cmd = (qw(rev-list --count), "$last..$cmt"); + if (defined $last && is_ancestor($self->git, $last, $latest_cmt)) { + my @cmd = (qw(rev-list --count), "$last..$latest_cmt"); chomp(my $n = $unit->{git}->qx(@cmd)); return if $n ne '' && $n == 0; } - last_epoch_commit($self, $unit->{epoch}, $cmt); + last_epoch_commit($self, $unit->{epoch}, $latest_cmt); } sub last_commits { @@ -992,9 +1016,10 @@ sub log_range ($$$) { my $range = "$cur..$tip"; $pr->("$i.git checking contiguity... ") if $pr; - if (is_ancestor($unit->{git}, $cur, $tip)) { # common case + my $git = $unit->{git}; + if (is_ancestor($sync->{self}->git, $cur, $tip)) { # common case $pr->("OK\n") if $pr; - my $n = $unit->{git}->qx(qw(rev-list --count), $range); + my $n = $git->qx(qw(rev-list --count), $range); chomp($n); if ($n == 0) { $sync->{ranges}->[$i] = undef; @@ -1006,9 +1031,9 @@ sub log_range ($$$) { $pr->("FAIL\n") if $pr; warn <<""; discontiguous range: $range -Rewritten history? (in $unit->{git}->{git_dir}) +Rewritten history? (in $git->{git_dir}) - chomp(my $base = $unit->{git}->qx('merge-base', $tip, $cur)); + chomp(my $base = $git->qx('merge-base', $tip, $cur)); if ($base) { $range = "$base..$tip"; warn "found merge-base: $base\n" @@ -1017,10 +1042,17 @@ Rewritten history? (in $unit->{git}->{git_dir}) warn "discarding history at $cur\n"; } warn <<""; -reindexing $unit->{git}->{git_dir} starting at -$range - - $unit->{unindex_range} = "$base..$cur"; +reindexing $git->{git_dir} +starting at $range + + # $cur^0 may no longer exist if pruned by git + if ($git->qx(qw(rev-parse -q --verify), "$cur^0")) { + $unit->{unindex_range} = "$base..$cur"; + } elsif ($base && $git->qx(qw(rev-parse -q --verify), $base)) { + $unit->{unindex_range} = "$base.."; + } else { + warn "W: unable to unindex before $range\n"; + } } $range; } @@ -1034,54 +1066,78 @@ sub sync_prepare ($$) { my $pr = $sync->{-opt}->{-progress}; my $regen_max = 0; my $head = $sync->{ibx}->{ref_head} || 'HEAD'; - - # reindex stops at the current heads and we later rerun index_sync - # without {reindex} - my $reindex_heads = $self->last_commits($sync) if $sync->{reindex}; - + my $pfx; + if ($pr) { + ($pfx) = ($sync->{ibx}->{inboxdir} =~ m!([^/]+)\z!g); + $pfx //= $sync->{ibx}->{inboxdir}; + } + + my $reindex_heads; + if ($self->{ibx_map}) { + # ExtSearchIdx won't index messages unless they're in + # over.sqlite3 for a given inbox, so don't read beyond + # what's in the per-inbox index. + $reindex_heads = []; + my $v = PublicInbox::Search::SCHEMA_VERSION; + my $mm = $sync->{ibx}->mm; + for my $i (0..$sync->{epoch_max}) { + $reindex_heads->[$i] = $mm->last_commit_xap($v, $i); + } + } elsif ($sync->{reindex}) { # V2 inbox + # reindex stops at the current heads and we later + # rerun index_sync without {reindex} + $reindex_heads = $self->last_commits($sync); + } if ($sync->{max_size} = $sync->{-opt}->{max_size}) { $sync->{index_oid} = $self->can('index_oid'); } + my $git_pfx = "$sync->{ibx}->{inboxdir}/git"; for (my $i = $sync->{epoch_max}; $i >= 0; $i--) { - my $git_dir = $sync->{ibx}->git_dir_n($i); + my $git_dir = "$git_pfx/$i.git"; -d $git_dir or next; # missing epochs are fine my $git = PublicInbox::Git->new($git_dir); my $unit = { git => $git, epoch => $i }; + my $tip; if ($reindex_heads) { - $head = $reindex_heads->[$i] or next; + $tip = $head = $reindex_heads->[$i] or next; + } else { + $tip = $git->qx(qw(rev-parse -q --verify), $head); + next if $?; # new repo + chomp $tip; } - chomp(my $tip = $git->qx(qw(rev-parse -q --verify), $head)); - next if $?; # new repo - my $range = log_range($sync, $unit, $tip) or next; # can't use 'rev-list --count' if we use --diff-filter - $pr->("$i.git counting $range ... ") if $pr; + $pr->("$pfx $i.git counting $range ... ") if $pr; # Don't bump num_highwater on --reindex by using {D}. # We intentionally do NOT use {D} in the non-reindex case # because we want NNTP article number gaps from unindexed # messages to show up in mirrors, too. $sync->{D} //= $sync->{reindex} ? {} : undef; # OID_BIN => NR my $stk = log2stack($sync, $git, $range); + return 0 if $sync->{quit}; my $nr = $stk ? $stk->num_records : 0; $pr->("$nr\n") if $pr; $unit->{stack} = $stk; # may be undef unshift @{$sync->{todo}}, $unit; $regen_max += $nr; } + return 0 if $sync->{quit}; # XXX this should not happen unless somebody bypasses checks in # our code and blindly injects "d" file history into git repos if (my @leftovers = keys %{delete($sync->{D}) // {}}) { warn('W: unindexing '.scalar(@leftovers)." leftovers\n"); + local $self->{current_info} = 'leftover '; my $unindex_oid = $self->can('unindex_oid'); for my $oid (@leftovers) { + last if $sync->{quit}; $oid = unpack('H*', $oid); - $self->{current_info} = "leftover $oid"; my $req = { %$sync, oid => $oid }; $self->git->cat_async($oid, $unindex_oid, $req); } $self->git->cat_async_wait; } + return 0 if $sync->{quit}; if (!$regen_max) { $sync->{-regen_fmt} = "%u/?\n"; return 0; @@ -1101,14 +1157,17 @@ sub unindex_oid_aux ($$$) { my @removed = $self->{oidx}->remove_oid($oid, $mid); for my $num (@removed) { my $idx = idx_shard($self, $num); - $idx->shard_remove($oid, $num); + $idx->shard_remove($num); } } sub unindex_oid ($$;$) { # git->cat_async callback - my ($bref, $oid, $type, $size, $sync) = @_; - my $self = $sync->{self}; - my $unindexed = $sync->{in_unindex} ? $sync->{unindexed} : undef; + my ($bref, $oid, $type, $size, $arg) = @_; + is_bad_blob($oid, $type, $size, $arg->{oid}) and + return index_finalize($arg, 0); + my $self = $arg->{self}; + local $self->{current_info} = "$self->{current_info} $oid"; + my $unindexed = $arg->{in_unindex} ? $arg->{unindexed} : undef; my $mm = $self->{mm}; my $mids = mids(PublicInbox::Eml->new($bref)); undef $$bref; @@ -1133,6 +1192,7 @@ sub unindex_oid ($$;$) { # git->cat_async callback } unindex_oid_aux($self, $oid, $mid); } + index_finalize($arg, 0); } sub git { $_[0]->{ibx}->git } @@ -1161,8 +1221,7 @@ sub unindex_todo ($$$) { return if $before == $after; # ensure any blob can not longer be accessed via dumb HTTP - PublicInbox::Import::run_die(['git', - "--git-dir=$unit->{git}->{git_dir}", + run_die(['git', "--git-dir=$unit->{git}->{git_dir}", qw(-c gc.reflogExpire=now gc --prune=all --quiet)]); } @@ -1200,6 +1259,7 @@ sub index_xap_step ($$$;$) { "$beg..$end (% $step)\n"); } for (my $num = $beg; $num <= $end; $num += $step) { + last if $sync->{quit}; my $smsg = $ibx->over->get_art($num) or next; $smsg->{self} = $self; $ibx->git->cat_async($smsg->{blob}, \&index_xap_only, $smsg); @@ -1212,16 +1272,36 @@ sub index_xap_step ($$$;$) { sub index_todo ($$$) { my ($self, $sync, $unit) = @_; + return if $sync->{quit}; unindex_todo($self, $sync, $unit); my $stk = delete($unit->{stack}) or return; my $all = $self->git; my $index_oid = $self->can('index_oid'); my $unindex_oid = $self->can('unindex_oid'); - my ($pfx) = ($unit->{git}->{git_dir} =~ m!/([^/]+)\z!g); - $pfx //= $unit->{git}->{git_dir}; - while (my ($f, $at, $ct, $oid) = $stk->pop_rec) { - $self->{current_info} = "$pfx $oid"; - my $req = { %$sync, autime => $at, cotime => $ct, oid => $oid }; + my $pfx; + if ($unit->{git}->{git_dir} =~ m!/([^/]+)/git/([0-9]+\.git)\z!) { + $pfx = "$1 $2"; # v2 + } else { # v1 + ($pfx) = ($unit->{git}->{git_dir} =~ m!/([^/]+)\z!g); + $pfx //= $unit->{git}->{git_dir}; + } + local $self->{current_info} = "$pfx "; + local $sync->{latest_cmt} = \(my $latest_cmt); + local $sync->{unit} = $unit; + while (my ($f, $at, $ct, $oid, $cmt) = $stk->pop_rec) { + if ($sync->{quit}) { + warn "waiting to quit...\n"; + $all->async_wait_all; + $self->update_last_commit($sync); + return; + } + my $req = { + %$sync, + autime => $at, + cotime => $ct, + oid => $oid, + cur_cmt => $cmt + }; if ($f eq 'm') { if ($sync->{max_size}) { $all->check_async($oid, \&check_size, $req); @@ -1236,7 +1316,7 @@ sub index_todo ($$$) { } } $all->async_wait_all; - $self->update_last_commit($sync, $unit, $stk->{latest_cmt}); + $self->update_last_commit($sync, $stk); } sub xapian_only { @@ -1257,6 +1337,7 @@ sub xapian_only { if ($seq || !$self->{parallel}) { my $shard_end = $self->{shards} - 1; for my $i (0..$shard_end) { + last if $sync->{quit}; index_xap_step($self, $sync, $art_beg + $i); if ($i != $shard_end) { reindex_checkpoint($self, $sync); @@ -1276,11 +1357,19 @@ sub index_sync { $opt //= {}; return xapian_only($self, $opt) if $opt->{xapian_only}; - my $pr = $opt->{-progress}; my $epoch_max; - my $latest = $self->{ibx}->git_dir_latest(\$epoch_max); - return unless defined $latest; + my $latest = $self->{ibx}->git_dir_latest(\$epoch_max) // return; + if ($opt->{'fast-noop'}) { # nanosecond (st_ctim) comparison + use Time::HiRes qw(stat); + if (my @mm = stat("$self->{ibx}->{inboxdir}/msgmap.sqlite3")) { + my $c = $mm[10]; # 10 = ctime (nsec NV) + my @hd = stat("$latest/refs/heads"); + my @pr = stat("$latest/packed-refs"); + return if $c > ($hd[10] // 0) && $c > ($pr[10] // 0); + } + } + my $pr = $opt->{-progress}; my $seq = $opt->{sequential_shard}; my $art_beg; # the NNTP article number we start xapian_only at my $idxlevel = $self->{ibx}->{indexlevel}; @@ -1297,6 +1386,11 @@ sub index_sync { ibx => $self->{ibx}, epoch_max => $epoch_max, }; + my $quit = PublicInbox::SearchIdx::quit_cb($sync); + local $SIG{QUIT} = $quit; + local $SIG{INT} = $quit; + local $SIG{TERM} = $quit; + if (sync_prepare($self, $sync)) { # tmp_clone seems to fail if inside a transaction, so # we rollback here (because we opened {mm} for reading) @@ -1309,13 +1403,13 @@ sub index_sync { # xapian_only works incrementally w/o --reindex if ($seq && !$opt->{reindex}) { - $art_beg = $sync->{mm_tmp}->max; - $art_beg++ if defined($art_beg); + $art_beg = $sync->{mm_tmp}->max || -1; + $art_beg++; } } # work forwards through history - index_todo($self, $sync, $_) for @{$sync->{todo}}; - $self->{oidx}->rethread_done($opt); + index_todo($self, $sync, $_) for @{delete($sync->{todo}) // []}; + $self->{oidx}->rethread_done($opt) unless $sync->{quit}; $self->done; if (my $nr = $sync->{nr}) { @@ -1323,14 +1417,21 @@ sub index_sync { $pr->('all.git '.sprintf($sync->{-regen_fmt}, $$nr)) if $pr; } + my $quit_warn; # deal with Xapian shards sequentially if ($seq && delete($sync->{mm_tmp})) { - $self->{ibx}->{indexlevel} = $idxlevel; - xapian_only($self, $opt, $sync, $art_beg); + if ($sync->{quit}) { + $quit_warn = 1; + } else { + $self->{ibx}->{indexlevel} = $idxlevel; + xapian_only($self, $opt, $sync, $art_beg); + $quit_warn = 1 if $sync->{quit}; + } } # --reindex on the command-line - if ($opt->{reindex} && !ref($opt->{reindex}) && $idxlevel ne 'basic') { + if (!$sync->{quit} && $opt->{reindex} && + !ref($opt->{reindex}) && $idxlevel ne 'basic') { $self->lock_acquire; my $s0 = PublicInbox::SearchIdx->new($self->{ibx}, 0, 0); if (my $xdb = $s0->idx_acquire) { @@ -1342,12 +1443,16 @@ sub index_sync { } # reindex does not pick up new changes, so we rerun w/o it: - if ($opt->{reindex}) { + if ($opt->{reindex} && !$sync->{quit}) { my %again = %$opt; $sync = undef; delete @again{qw(rethread reindex -skip_lock)}; index_sync($self, \%again); + $opt->{quit} = $again{quit}; # propagate to caller } + warn <