X-Git-Url: http://www.git.stargrave.org/?a=blobdiff_plain;f=lib%2FPublicInbox%2FV2Writable.pm;h=ed5182ae846029a5af0837060335c7d7d8a45066;hb=23af251dd607c4e75ab1e68063f2c885c48cc035;hp=f575ba111580508c45ed1fac51d35ea7ad046e1a;hpb=a13f8b713f991c16ae9df86b28a0cb48d84cc8fc;p=public-inbox.git diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm index f575ba11..ed5182ae 100644 --- a/lib/PublicInbox/V2Writable.pm +++ b/lib/PublicInbox/V2Writable.pm @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2020 all contributors +# Copyright (C) 2018-2021 all contributors # License: AGPL-3.0+ # This interface wraps and mimics PublicInbox::Import @@ -6,24 +6,28 @@ package PublicInbox::V2Writable; use strict; use v5.10.1; -use parent qw(PublicInbox::Lock); +use parent qw(PublicInbox::Lock PublicInbox::IPC); use PublicInbox::SearchIdxShard; +use PublicInbox::IPC; use PublicInbox::Eml; use PublicInbox::Git; use PublicInbox::Import; +use PublicInbox::MultiGit; use PublicInbox::MID qw(mids references); -use PublicInbox::ContentHash qw(content_hash content_digest); +use PublicInbox::ContentHash qw(content_hash content_digest git_sha); use PublicInbox::InboxWritable; use PublicInbox::OverIdx; use PublicInbox::Msgmap; -use PublicInbox::Spawn qw(spawn popen_rd); -use PublicInbox::SearchIdx qw(log2stack crlf_adjust is_ancestor check_size); +use PublicInbox::Spawn qw(spawn popen_rd run_die); +use PublicInbox::Search; +use PublicInbox::SearchIdx qw(log2stack is_ancestor check_size is_bad_blob); use IO::Handle; # ->autoflush use File::Temp (); +use POSIX (); my $OID = qr/[a-f0-9]{40,}/; # an estimate of the post-packed size to the raw uncompressed size -my $PACKING_FACTOR = 0.4; +our $PACKING_FACTOR = 0.4; # SATA storage lags behind what CPUs are capable of, so relying on # nproc(1) can be misleading and having extra Xapian shards is a @@ -33,27 +37,13 @@ my $PACKING_FACTOR = 0.4; # to increase Xapian shards our $NPROC_MAX_DEFAULT = 4; -sub detect_nproc () { - # getconf(1) is POSIX, but *NPROCESSORS* vars are not - for (qw(_NPROCESSORS_ONLN NPROCESSORS_ONLN)) { - `getconf $_ 2>/dev/null` =~ /^(\d+)$/ and return $1; - } - for my $nproc (qw(nproc gnproc)) { # GNU coreutils nproc - `$nproc 2>/dev/null` =~ /^(\d+)$/ and return $1; - } - - # should we bother with `sysctl hw.ncpu`? Those only give - # us total processor count, not online processor count. - undef -} - sub nproc_shards ($) { my ($creat_opt) = @_; my $n = $creat_opt->{nproc} if ref($creat_opt) eq 'HASH'; $n //= $ENV{NPROC}; if (!$n) { # assume 2 cores if not detectable or zero - state $NPROC_DETECTED = detect_nproc() || 2; + state $NPROC_DETECTED = PublicInbox::IPC::detect_nproc() || 2; $n = $NPROC_DETECTED; $n = $NPROC_MAX_DEFAULT if $n > $NPROC_MAX_DEFAULT; } @@ -67,9 +57,13 @@ sub count_shards ($) { my ($self) = @_; # always load existing shards in case core count changes: # Also, shard count may change while -watch is running - my $srch = $self->{ibx}->search or return 0; - delete $self->{ibx}->{search}; - $srch->{nshard} // 0 + if (my $ibx = $self->{ibx}) { + my $srch = $ibx->search or return 0; + delete $ibx->{search}; + $srch->{nshard} // 0 + } else { # ExtSearchIdx + $self->{nshard} = scalar($self->xdb_shards_flat); + } } sub new { @@ -79,18 +73,14 @@ sub new { $v2ibx = PublicInbox::InboxWritable->new($v2ibx); my $dir = $v2ibx->assert_usable_dir; unless (-d $dir) { - if ($creat) { - require File::Path; - File::Path::mkpath($dir); - } else { - die "$dir does not exist\n"; - } + die "$dir does not exist\n" if !$creat; + require File::Path; + File::Path::mkpath($dir); } - $v2ibx->umask_prepare; - my $xpfx = "$dir/xap" . PublicInbox::Search::SCHEMA_VERSION; my $self = { ibx => $v2ibx, + mg => PublicInbox::MultiGit->new($dir, 'all.git', 'git'), im => undef, # PublicInbox::Import parallel => 1, transact_bytes => 0, @@ -117,12 +107,9 @@ sub init_inbox { } $self->idx_init; $self->{mm}->skip_artnum($skip_artnum) if defined $skip_artnum; - my $epoch_max = -1; - git_dir_latest($self, \$epoch_max); - if (defined $skip_epoch && $epoch_max == -1) { - $epoch_max = $skip_epoch; - } - $self->git_init($epoch_max >= 0 ? $epoch_max : 0); + my $max = $self->{ibx}->max_git_epoch; + $max = $skip_epoch if (defined($skip_epoch) && !defined($max)); + $self->{mg}->add_epoch($max // 0); $self->done; } @@ -139,13 +126,14 @@ sub idx_shard ($$) { } # indexes a message, returns true if checkpointing is needed -sub do_idx ($$$$) { - my ($self, $msgref, $mime, $smsg) = @_; - $smsg->{bytes} = $smsg->{raw_bytes} + crlf_adjust($$msgref); - $self->{oidx}->add_overview($mime, $smsg); - my $idx = idx_shard($self, $smsg->{num}); - $idx->index_raw($msgref, $mime, $smsg); - my $n = $self->{transact_bytes} += $smsg->{raw_bytes}; +sub do_idx ($$$) { + my ($self, $eml, $smsg) = @_; + $self->{oidx}->add_overview($eml, $smsg); + if ($self->{-need_xapian}) { + my $idx = idx_shard($self, $smsg->{num}); + $idx->index_eml($eml, $smsg); + } + my $n = $self->{transact_bytes} += $smsg->{bytes}; $n >= $self->{batch_bytes}; } @@ -173,8 +161,7 @@ sub _add { $cmt = $im->get_mark($cmt); $self->{last_commit}->[$self->{epoch_max}] = $cmt; - my $msgref = delete $smsg->{-raw_email}; - if (do_idx($self, $msgref, $mime, $smsg)) { + if (do_idx($self, $mime, $smsg)) { $self->checkpoint; } @@ -264,35 +251,29 @@ sub _idx_init { # with_umask callback $self->{shards} = $nshards if $nshards && $nshards != $self->{shards}; $self->{batch_bytes} = $opt->{batch_size} // $PublicInbox::SearchIdx::BATCH_BYTES; - $self->{batch_bytes} *= $self->{shards} if $self->{parallel}; # need to create all shards before initializing msgmap FD # idx_shards must be visible to all forked processes my $max = $self->{shards} - 1; my $idx = $self->{idx_shards} = []; push @$idx, PublicInbox::SearchIdxShard->new($self, $_) for (0..$max); + $self->{-need_xapian} = $idx->[0]->need_xapian; + + # SearchIdxShard may do their own flushing, so don't scale + # until after forking + $self->{batch_bytes} *= $self->{shards} if $self->{parallel}; + my $ibx = $self->{ibx} or return; # ExtIdxSearch # Now that all subprocesses are up, we can open the FDs # for SQLite: - my $mm = $self->{mm} = PublicInbox::Msgmap->new_file( - "$ibx->{inboxdir}/msgmap.sqlite3", - $ibx->{-no_fsync} ? 2 : 1); + my $mm = $self->{mm} = PublicInbox::Msgmap->new_file($ibx, 1); $mm->{dbh}->begin_work; } sub parallel_init ($$) { my ($self, $indexlevel) = @_; - if (($indexlevel // 'full') eq 'basic') { - $self->{parallel} = 0; - } else { - pipe(my ($r, $w)) or die "pipe failed: $!"; - # pipe for barrier notifications doesn't need to be big, - # 1031: F_SETPIPE_SZ - fcntl($w, 1031, 4096) if $^O eq 'linux'; - $self->{bnote} = [ $r, $w ]; - $w->autoflush(1); - } + $self->{parallel} = 0 if ($indexlevel // 'full') eq 'basic'; } # idempotent @@ -308,7 +289,6 @@ sub idx_init { $ibx->git->cleanup; parallel_init($self, $ibx->{indexlevel}); - $ibx->umask_prepare; $ibx->with_umask(\&_idx_init, $self, $opt); } @@ -318,14 +298,10 @@ sub idx_init { sub _replace_oids ($$$) { my ($self, $mime, $replace_map) = @_; $self->done; - my $pfx = "$self->{ibx}->{inboxdir}/git"; + my $ibx = $self->{ibx}; + my $pfx = "$ibx->{inboxdir}/git"; my $rewrites = []; # epoch => commit - my $max = $self->{epoch_max}; - - unless (defined($max)) { - defined(my $latest = git_dir_latest($self, \$max)) or return; - $self->{epoch_max} = $max; - } + my $max = $self->{epoch_max} //= $ibx->max_git_epoch // return; foreach my $i (0..$max) { my $git_dir = "$pfx/$i.git"; @@ -420,7 +396,7 @@ sub rewrite_internal ($$;$$$) { } else { # ->purge or ->remove $self->{mm}->num_delete($num); } - unindex_oid_remote($self, $oid, $mid); + unindex_oid_aux($self, $oid, $mid); } } @@ -468,23 +444,6 @@ sub purge { $rewritten->{rewrites} } -# returns the git object_id of $fh, does not write the object to FS -sub git_hash_raw ($$) { - my ($self, $raw) = @_; - # grab the expected OID we have to reindex: - pipe(my($in, $w)) or die "pipe: $!"; - my $git_dir = $self->git->{git_dir}; - my $cmd = ['git', "--git-dir=$git_dir", qw(hash-object --stdin)]; - my $r = popen_rd($cmd, undef, { 0 => $in }); - print $w $$raw or die "print \$w: $!"; - close $w or die "close \$w: $!"; - local $/ = "\n"; - chomp(my $oid = <$r>); - close $r or die "git hash-object failed: $?"; - $oid =~ /\A$OID\z/ or die "OID not expected: $oid"; - $oid; -} - sub _check_mids_match ($$$) { my ($old_list, $new_list, $hdrs) = @_; my %old_mids = map { $_ => 1 } @$old_list; @@ -519,7 +478,7 @@ sub replace ($$$) { PublicInbox::Import::drop_unwanted_headers($new_mime); my $raw = $new_mime->as_string; - my $expect_oid = git_hash_raw($self, \$raw); + my $expect_oid = git_sha(1, \$raw)->hexdigest; my $rewritten = _replace($self, $old_mime, $new_mime, \$raw) or return; my $need_reindex = $rewritten->{need_reindex}; @@ -547,13 +506,13 @@ W: $list for my $smsg (@$need_reindex) { my $new_smsg = bless { blob => $blob, - raw_bytes => $bytes, num => $smsg->{num}, mid => $smsg->{mid}, }, 'PublicInbox::Smsg'; my $sync = { autime => $smsg->{ds}, cotime => $smsg->{ts} }; $new_smsg->populate($new_mime, $sync); - do_idx($self, \$raw, $new_mime, $new_smsg); + $new_smsg->set_bytes($raw, $bytes); + do_idx($self, $new_mime, $new_smsg); } $rewritten->{rewrites}; } @@ -564,7 +523,7 @@ sub last_epoch_commit ($$;$) { $self->{mm}->last_commit_xap($v, $i, $cmt); } -sub set_last_commits ($) { +sub set_last_commits ($) { # this is NOT for ExtSearchIdx my ($self) = @_; defined(my $epoch_max = $self->{epoch_max}) or return; my $last_commit = $self->{last_commit}; @@ -575,24 +534,6 @@ sub set_last_commits ($) { } } -sub barrier_init { - my ($self, $n) = @_; - $self->{bnote} or return; - --$n; - my $barrier = { map { $_ => 1 } (0..$n) }; -} - -sub barrier_wait { - my ($self, $barrier) = @_; - my $bnote = $self->{bnote} or return; - my $r = $bnote->[0]; - while (scalar keys %$barrier) { - defined(my $l = readline($r)) or die "EOF on barrier_wait: $!"; - $l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l"; - delete $barrier->{$1} or die "bad shard[$1] on barrier wait"; - } -} - # public sub checkpoint ($;$) { my ($self, $wait) = @_; @@ -606,34 +547,50 @@ sub checkpoint ($;$) { } my $shards = $self->{idx_shards}; if ($shards) { - my $dbh = $self->{mm}->{dbh}; + my $dbh = $self->{mm}->{dbh} if $self->{mm}; # SQLite msgmap data is second in importance - $dbh->commit; + $dbh->commit if $dbh; + eval { $dbh->do('PRAGMA optimize') }; # SQLite overview is third $self->{oidx}->commit_lazy; # Now deal with Xapian - if ($wait) { - my $barrier = $self->barrier_init(scalar @$shards); - # each shard needs to issue a barrier command - $_->shard_barrier for @$shards; + # start commit_txn_lazy asynchronously on all parallel shards + # (non-parallel waits here) + $_->ipc_do('commit_txn_lazy') for @$shards; + + # transactions started on parallel shards, + # wait for them by issuing an echo command (echo can only + # run after commit_txn_lazy is done) + if ($wait && $self->{parallel}) { + my $i = 0; + for my $shard (@$shards) { + my $echo = $shard->ipc_do('echo', $i); + $echo == $i or die <<""; +shard[$i] bad echo:$echo != $i waiting for txn commit + + ++$i; + } + } - # wait for each Xapian shard - $self->barrier_wait($barrier); - } else { - $_->shard_commit for @$shards; + my $midx = $self->{midx}; # misc index + if ($midx) { + $midx->commit_txn; + $PublicInbox::Search::X{CLOEXEC_UNSET} and + $self->git->cleanup; } # last_commit is special, don't commit these until - # remote shards are done: - $dbh->begin_work; + # Xapian shards are done: + $dbh->begin_work if $dbh; set_last_commits($self); - $dbh->commit; - - $dbh->begin_work; + if ($dbh) { + $dbh->commit; + $dbh->begin_work; + } } $self->{total_bytes} += $self->{transact_bytes}; $self->{transact_bytes} = 0; @@ -664,6 +621,11 @@ sub done { eval { $mm->{dbh}->$m }; $err .= "msgmap $m: $@\n" if $@; } + if ($self->{oidx} && $self->{oidx}->{dbh} && $err) { + eval { $self->{oidx}->rollback_lazy }; + $err .= "overview rollback: $@\n" if $@; + } + my $shards = delete $self->{idx_shards}; if ($shards) { for (@$shards) { @@ -673,7 +635,7 @@ sub done { } eval { $self->{oidx}->dbh_close }; $err .= "over close: $@\n" if $@; - delete $self->{bnote}; + delete $self->{midx}; my $nbytes = $self->{total_bytes}; $self->{total_bytes} = 0; $self->lock_release(!!$nbytes) if $shards; @@ -681,88 +643,6 @@ sub done { die $err if $err; } -sub write_alternates ($$$) { - my ($info_dir, $mode, $out) = @_; - my $fh = File::Temp->new(TEMPLATE => 'alt-XXXXXXXX', DIR => $info_dir); - my $tmp = $fh->filename; - print $fh @$out or die "print $tmp: $!\n"; - chmod($mode, $fh) or die "fchmod $tmp: $!\n"; - close $fh or die "close $tmp $!\n"; - my $alt = "$info_dir/alternates"; - rename($tmp, $alt) or die "rename $tmp => $alt: $!\n"; - $fh->unlink_on_destroy(0); -} - -sub fill_alternates ($$) { - my ($self, $epoch) = @_; - - my $pfx = "$self->{ibx}->{inboxdir}/git"; - my $all = "$self->{ibx}->{inboxdir}/all.git"; - PublicInbox::Import::init_bare($all) unless -d $all; - my $info_dir = "$all/objects/info"; - my $alt = "$info_dir/alternates"; - my (%alt, $new); - my $mode = 0644; - if (-e $alt) { - open(my $fh, '<', $alt) or die "open < $alt: $!\n"; - $mode = (stat($fh))[2] & 07777; - - # we assign a sort score to every alternate and favor - # the newest (highest numbered) one because loose objects - # require scanning epochs and only the latest epoch is - # expected to see loose objects - my $score; - my $other = 0; # in case admin adds non-epoch repos - %alt = map {; - if (m!\A\Q../../\E([0-9]+)\.git/objects\z!) { - $score = $1 + 0; - } else { - $score = --$other; - } - $_ => $score; - } split(/\n+/, do { local $/; <$fh> }); - } - - foreach my $i (0..$epoch) { - my $dir = "../../git/$i.git/objects"; - if (!exists($alt{$dir}) && -d "$pfx/$i.git") { - $alt{$dir} = $i; - $new = 1; - } - } - return unless $new; - write_alternates($info_dir, $mode, - [join("\n", sort { $alt{$b} <=> $alt{$a} } keys %alt), "\n"]); -} - -sub git_init { - my ($self, $epoch) = @_; - my $git_dir = "$self->{ibx}->{inboxdir}/git/$epoch.git"; - PublicInbox::Import::init_bare($git_dir); - my @cmd = (qw/git config/, "--file=$git_dir/config", - 'include.path', '../../all.git/config'); - PublicInbox::Import::run_die(\@cmd); - fill_alternates($self, $epoch); - $git_dir -} - -sub git_dir_latest { - my ($self, $max) = @_; - $$max = -1; - my $pfx = "$self->{ibx}->{inboxdir}/git"; - return unless -d $pfx; - my $latest; - opendir my $dh, $pfx or die "opendir $pfx: $!\n"; - while (defined(my $git_dir = readdir($dh))) { - $git_dir =~ m!\A([0-9]+)\.git\z! or next; - if ($1 > $$max) { - $$max = $1; - $latest = "$pfx/$git_dir"; - } - } - $latest; -} - sub importer { my ($self) = @_; my $im = $self->{im}; @@ -774,14 +654,14 @@ sub importer { $im->done; $im = undef; $self->checkpoint; - my $git_dir = $self->git_init(++$self->{epoch_max}); - my $git = PublicInbox::Git->new($git_dir); + my $dir = $self->{mg}->add_epoch(++$self->{epoch_max}); + my $git = PublicInbox::Git->new($dir); return $self->import_init($git, 0); } } my $epoch = 0; my $max; - my $latest = git_dir_latest($self, \$max); + my $latest = $self->{ibx}->git_dir_latest(\$max); if (defined $latest) { my $git = PublicInbox::Git->new($latest); my $packed_bytes = $git->packed_bytes; @@ -795,8 +675,8 @@ sub importer { } } $self->{epoch_max} = $epoch; - $latest = $self->git_init($epoch); - $self->import_init(PublicInbox::Git->new($latest), 0); + my $dir = $self->{mg}->add_epoch($epoch); + $self->import_init(PublicInbox::Git->new($dir), 0); } sub import_init { @@ -813,11 +693,11 @@ sub import_init { sub diff ($$$) { my ($mid, $cur, $new) = @_; - my $ah = File::Temp->new(TEMPLATE => 'email-cur-XXXXXXXX', TMPDIR => 1); + my $ah = File::Temp->new(TEMPLATE => 'email-cur-XXXX', TMPDIR => 1); print $ah $cur->as_string or die "print: $!"; $ah->flush or die "flush: $!"; PublicInbox::Import::drop_unwanted_headers($new); - my $bh = File::Temp->new(TEMPLATE => 'email-new-XXXXXXXX', TMPDIR => 1); + my $bh = File::Temp->new(TEMPLATE => 'email-new-XXXX', TMPDIR => 1); print $bh $new->as_string or die "print: $!"; $bh->flush or die "flush: $!"; my $cmd = [ qw(diff -u), $ah->filename, $bh->filename ]; @@ -858,43 +738,62 @@ sub content_exists ($$$) { sub atfork_child { my ($self) = @_; - if (my $shards = $self->{idx_shards}) { - $_->atfork_child foreach @$shards; + if (my $older_siblings = $self->{idx_shards}) { + $_->ipc_sibling_atfork_child for @$older_siblings; } if (my $im = $self->{im}) { $im->atfork_child; } - die "unexpected mm" if $self->{mm}; - close $self->{bnote}->[0] or die "close bnote[0]: $!\n"; - $self->{bnote}->[1]; + die "BUG: unexpected mm" if $self->{mm}; } sub reindex_checkpoint ($$) { my ($self, $sync) = @_; - $self->git->cleanup; # *async_wait + $self->git->async_wait_all; + $self->update_last_commit($sync); ${$sync->{need_checkpoint}} = 0; my $mm_tmp = $sync->{mm_tmp}; $mm_tmp->atfork_prepare if $mm_tmp; - $self->done; # release lock + die 'BUG: {im} during reindex' if $self->{im}; + if ($self->{ibx_map} && !$sync->{checkpoint_unlocks}) { + checkpoint($self, 1); # no need to release lock on pure index + } else { + $self->done; # release lock + } - if (my $pr = $sync->{-opt}->{-progress}) { + if (my $pr = $sync->{-regen_fmt} ? $sync->{-opt}->{-progress} : undef) { $pr->(sprintf($sync->{-regen_fmt}, ${$sync->{nr}})); } # allow -watch or -mda to write... $self->idx_init($sync->{-opt}); # reacquire lock + if (my $intvl = $sync->{check_intvl}) { # eidx + $sync->{next_check} = PublicInbox::DS::now() + $intvl; + } $mm_tmp->atfork_parent if $mm_tmp; } +sub index_finalize ($$) { + my ($arg, $index) = @_; + ++$arg->{self}->{nidx}; + if (defined(my $cur = $arg->{cur_cmt})) { + ${$arg->{latest_cmt}} = $cur; + } elsif ($index) { + die 'BUG: {cur_cmt} missing'; + } # else { unindexing @leftovers doesn't set {cur_cmt} +} + sub index_oid { # cat_async callback my ($bref, $oid, $type, $size, $arg) = @_; - return if $size == 0; # purged + is_bad_blob($oid, $type, $size, $arg->{oid}) and + return index_finalize($arg, 1); # size == 0 purged returns here + my $self = $arg->{self}; + local $self->{current_info} = "$self->{current_info} $oid"; my ($num, $mid0); my $eml = PublicInbox::Eml->new($$bref); my $mids = mids($eml); my $chash = content_hash($eml); - my $self = $arg->{v2w}; if (scalar(@$mids) == 0) { warn "E: $oid has no Message-ID, skipping\n"; @@ -902,16 +801,20 @@ sub index_oid { # cat_async callback } # {unindexed} is unlikely - if ((my $unindexed = $arg->{unindexed}) && scalar(@$mids) == 1) { - $num = delete($unindexed->{$mids->[0]}); + if (my $unindexed = $arg->{unindexed}) { + my $oidbin = pack('H*', $oid); + my $u = $unindexed->{$oidbin}; + ($num, $mid0) = splice(@$u, 0, 2) if $u; if (defined $num) { - $mid0 = $mids->[0]; $self->{mm}->mid_set($num, $mid0); - delete($arg->{unindexed}) if !keys(%$unindexed); + if (scalar(@$u) == 0) { # done with current OID + delete $unindexed->{$oidbin}; + delete($arg->{unindexed}) if !keys(%$unindexed); + } } } + my $oidx = $self->{oidx}; if (!defined($num)) { # reuse if reindexing (or duplicates) - my $oidx = $self->{oidx}; for my $mid (@$mids) { ($num, $mid0) = $oidx->num_mid0_for_oid($oid, $mid); last if defined $num; @@ -919,6 +822,11 @@ sub index_oid { # cat_async callback } $mid0 //= do { # is this a number we got before? $num = $arg->{mm_tmp}->num_for($mids->[0]); + + # don't clobber existing if Message-ID is reused: + if (my $x = defined($num) ? $oidx->get_art($num) : undef) { + undef($num) if $x->{blob} ne $oid; + } defined($num) ? $mids->[0] : undef; }; if (!defined($num)) { @@ -952,45 +860,53 @@ sub index_oid { # cat_async callback } ++${$arg->{nr}}; my $smsg = bless { - raw_bytes => $size, num => $num, blob => $oid, mid => $mid0, }, 'PublicInbox::Smsg'; $smsg->populate($eml, $arg); - if (do_idx($self, $bref, $eml, $smsg)) { + $smsg->set_bytes($$bref, $size); + if (do_idx($self, $eml, $smsg)) { ${$arg->{need_checkpoint}} = 1; } + index_finalize($arg, 1); } # only update last_commit for $i on reindex iff newer than current sub update_last_commit { - my ($self, $git, $i, $cmt) = @_; - my $last = last_epoch_commit($self, $i); - if (defined $last && is_ancestor($git, $last, $cmt)) { - my @cmd = (qw(rev-list --count), "$last..$cmt"); - chomp(my $n = $git->qx(@cmd)); + my ($self, $sync, $stk) = @_; + my $unit = $sync->{unit} // return; + my $latest_cmt = $stk ? $stk->{latest_cmt} : ${$sync->{latest_cmt}}; + defined($latest_cmt) or return; + my $last = last_epoch_commit($self, $unit->{epoch}); + if (defined $last && is_ancestor($self->git, $last, $latest_cmt)) { + my @cmd = (qw(rev-list --count), "$last..$latest_cmt"); + chomp(my $n = $unit->{git}->qx(@cmd)); return if $n ne '' && $n == 0; } - last_epoch_commit($self, $i, $cmt); -} + # don't rewind if --{since,until,before,after} are in use + return if (defined($last) && + grep(defined, @{$sync->{-opt}}{qw(since until)}) && + is_ancestor($self->git, $latest_cmt, $last)); -sub git_dir_n ($$) { "$_[0]->{ibx}->{inboxdir}/git/$_[1].git" } + last_epoch_commit($self, $unit->{epoch}, $latest_cmt); +} -sub last_commits ($$) { - my ($self, $epoch_max) = @_; +sub last_commits { + my ($self, $sync) = @_; my $heads = []; - for (my $i = $epoch_max; $i >= 0; $i--) { + for (my $i = $sync->{epoch_max}; $i >= 0; $i--) { $heads->[$i] = last_epoch_commit($self, $i); } $heads; } # returns a revision range for git-log(1) -sub log_range ($$$$$) { - my ($self, $sync, $git, $i, $tip) = @_; +sub log_range ($$$) { + my ($sync, $unit, $tip) = @_; my $opt = $sync->{-opt}; my $pr = $opt->{-progress} if (($opt->{verbose} || 0) > 1); + my $i = $unit->{epoch}; my $cur = $sync->{ranges}->[$i] or do { $pr->("$i.git indexing all of $tip\n") if $pr; return $tip; # all of it @@ -1004,7 +920,8 @@ sub log_range ($$$$$) { my $range = "$cur..$tip"; $pr->("$i.git checking contiguity... ") if $pr; - if (is_ancestor($git, $cur, $tip)) { # common case + my $git = $unit->{git}; + if (is_ancestor($sync->{self}->git, $cur, $tip)) { # common case $pr->("OK\n") if $pr; my $n = $git->qx(qw(rev-list --count), $range); chomp($n); @@ -1029,61 +946,102 @@ Rewritten history? (in $git->{git_dir}) warn "discarding history at $cur\n"; } warn <<""; -reindexing $git->{git_dir} starting at -$range - - $sync->{unindex_range}->{$i} = "$base..$cur"; +reindexing $git->{git_dir} +starting at $range + + # $cur^0 may no longer exist if pruned by git + if ($git->qx(qw(rev-parse -q --verify), "$cur^0")) { + $unit->{unindex_range} = "$base..$cur"; + } elsif ($base && $git->qx(qw(rev-parse -q --verify), $base)) { + $unit->{unindex_range} = "$base.."; + } else { + warn "W: unable to unindex before $range\n"; + } } $range; } -sub sync_prepare ($$$) { - my ($self, $sync, $epoch_max) = @_; +# overridden by ExtSearchIdx +sub artnum_max { $_[0]->{mm}->num_highwater } + +sub sync_prepare ($$) { + my ($self, $sync) = @_; + $sync->{ranges} = sync_ranges($self, $sync); my $pr = $sync->{-opt}->{-progress}; my $regen_max = 0; - my $head = $self->{ibx}->{ref_head} || 'HEAD'; - - # reindex stops at the current heads and we later rerun index_sync - # without {reindex} - my $reindex_heads = $self->last_commits($epoch_max) if $sync->{reindex}; - - for (my $i = $epoch_max; $i >= 0; $i--) { - my $git_dir = git_dir_n($self, $i); + my $head = $sync->{ibx}->{ref_head} || 'HEAD'; + my $pfx; + if ($pr) { + ($pfx) = ($sync->{ibx}->{inboxdir} =~ m!([^/]+)\z!g); + $pfx //= $sync->{ibx}->{inboxdir}; + } + + my $reindex_heads; + if ($self->{ibx_map}) { + # ExtSearchIdx won't index messages unless they're in + # over.sqlite3 for a given inbox, so don't read beyond + # what's in the per-inbox index. + $reindex_heads = []; + my $v = PublicInbox::Search::SCHEMA_VERSION; + my $mm = $sync->{ibx}->mm; + for my $i (0..$sync->{epoch_max}) { + $reindex_heads->[$i] = $mm->last_commit_xap($v, $i); + } + } elsif ($sync->{reindex}) { # V2 inbox + # reindex stops at the current heads and we later + # rerun index_sync without {reindex} + $reindex_heads = $self->last_commits($sync); + } + if ($sync->{max_size} = $sync->{-opt}->{max_size}) { + $sync->{index_oid} = $self->can('index_oid'); + } + my $git_pfx = "$sync->{ibx}->{inboxdir}/git"; + for (my $i = $sync->{epoch_max}; $i >= 0; $i--) { + my $git_dir = "$git_pfx/$i.git"; -d $git_dir or next; # missing epochs are fine my $git = PublicInbox::Git->new($git_dir); + my $unit = { git => $git, epoch => $i }; + my $tip; if ($reindex_heads) { - $head = $reindex_heads->[$i] or next; + $tip = $head = $reindex_heads->[$i] or next; + } else { + $tip = $git->qx(qw(rev-parse -q --verify), $head); + next if $?; # new repo + chomp $tip; } - chomp(my $tip = $git->qx(qw(rev-parse -q --verify), $head)); - - next if $?; # new repo - my $range = log_range($self, $sync, $git, $i, $tip) or next; + my $range = log_range($sync, $unit, $tip) or next; # can't use 'rev-list --count' if we use --diff-filter - $pr->("$i.git counting $range ... ") if $pr; + $pr->("$pfx $i.git counting $range ... ") if $pr; # Don't bump num_highwater on --reindex by using {D}. # We intentionally do NOT use {D} in the non-reindex case # because we want NNTP article number gaps from unindexed # messages to show up in mirrors, too. $sync->{D} //= $sync->{reindex} ? {} : undef; # OID_BIN => NR - my $stk = log2stack($sync, $git, $range, $self->{ibx}); + my $stk = log2stack($sync, $git, $range); + return 0 if $sync->{quit}; my $nr = $stk ? $stk->num_records : 0; $pr->("$nr\n") if $pr; - $sync->{stacks}->[$i] = $stk if $stk; + $unit->{stack} = $stk; # may be undef + unshift @{$sync->{todo}}, $unit; $regen_max += $nr; } + return 0 if $sync->{quit}; # XXX this should not happen unless somebody bypasses checks in # our code and blindly injects "d" file history into git repos if (my @leftovers = keys %{delete($sync->{D}) // {}}) { warn('W: unindexing '.scalar(@leftovers)." leftovers\n"); - my $arg = { v2w => $self }; + local $self->{current_info} = 'leftover '; + my $unindex_oid = $self->can('unindex_oid'); for my $oid (@leftovers) { + last if $sync->{quit}; $oid = unpack('H*', $oid); - $self->{current_info} = "leftover $oid"; - $self->git->cat_async($oid, \&unindex_oid, $arg); + my $req = { %$sync, oid => $oid }; + $self->git->cat_async($oid, $unindex_oid, $req); } - $self->git->cat_async_wait; + $self->git->async_wait_all; } + return 0 if $sync->{quit}; if (!$regen_max) { $sync->{-regen_fmt} = "%u/?\n"; return 0; @@ -1095,22 +1053,25 @@ sub sync_prepare ($$$) { $sync->{-regen_fmt} = "% ${pad}u/$regen_max\n"; $sync->{nr} = \(my $nr = 0); return -1 if $sync->{reindex}; - $regen_max + $self->{mm}->num_highwater() || 0; + $regen_max + $self->artnum_max || 0; } -sub unindex_oid_remote ($$$) { +sub unindex_oid_aux ($$$) { my ($self, $oid, $mid) = @_; my @removed = $self->{oidx}->remove_oid($oid, $mid); + return unless $self->{-need_xapian}; for my $num (@removed) { - my $idx = idx_shard($self, $num); - $idx->shard_remove($oid, $num); + idx_shard($self, $num)->ipc_do('xdb_remove', $num); } } sub unindex_oid ($$;$) { # git->cat_async callback - my ($bref, $oid, $type, $size, $sync) = @_; - my $self = $sync->{v2w}; - my $unindexed = $sync->{in_unindex} ? $sync->{unindexed} : undef; + my ($bref, $oid, $type, $size, $arg) = @_; + is_bad_blob($oid, $type, $size, $arg->{oid}) and + return index_finalize($arg, 0); + my $self = $arg->{self}; + local $self->{current_info} = "$self->{current_info} $oid"; + my $unindexed = $arg->{in_unindex} ? $arg->{unindexed} : undef; my $mm = $self->{mm}; my $mids = mids(PublicInbox::Eml->new($bref)); undef $$bref; @@ -1126,51 +1087,55 @@ sub unindex_oid ($$;$) { # git->cat_async callback warn "BUG: multiple articles linked to $oid\n", join(',',sort keys %gone), "\n"; } - foreach my $num (keys %gone) { + # reuse (num => mid) mapping in ascending numeric order + for my $num (sort { $a <=> $b } keys %gone) { + $num += 0; if ($unindexed) { my $mid0 = $mm->mid_for($num); - $unindexed->{$mid0} = $num; + my $oidbin = pack('H*', $oid); + push @{$unindexed->{$oidbin}}, $num, $mid0; } $mm->num_delete($num); } - unindex_oid_remote($self, $oid, $mid); + unindex_oid_aux($self, $oid, $mid); } + index_finalize($arg, 0); } sub git { $_[0]->{ibx}->git } # this is rare, it only happens when we get discontiguous history in # a mirror because the source used -purge or -edit -sub unindex ($$$$) { - my ($self, $sync, $git, $unindex_range) = @_; - my $unindexed = $sync->{unindexed} //= {}; # $mid0 => $num +sub unindex_todo ($$$) { + my ($self, $sync, $unit) = @_; + my $unindex_range = delete($unit->{unindex_range}) // return; + my $unindexed = $sync->{unindexed} //= {}; # $oidbin => [$num, $mid0] my $before = scalar keys %$unindexed; # order does not matter, here: - my @cmd = qw(log --raw -r - --no-notes --no-color --no-abbrev --no-renames); - my $fh = $git->popen(@cmd, $unindex_range); + my $fh = $unit->{git}->popen(qw(log --raw -r --no-notes --no-color + --no-abbrev --no-renames), $unindex_range); local $sync->{in_unindex} = 1; + my $unindex_oid = $self->can('unindex_oid'); while (<$fh>) { /\A:\d{6} 100644 $OID ($OID) [AM]\tm$/o or next; - $self->git->cat_async($1, \&unindex_oid, $sync); + $self->git->cat_async($1, $unindex_oid, { %$sync, oid => $1 }); } close $fh or die "git log failed: \$?=$?"; - $self->git->cat_async_wait; + $self->git->async_wait_all; return unless $sync->{-opt}->{prune}; my $after = scalar keys %$unindexed; return if $before == $after; # ensure any blob can not longer be accessed via dumb HTTP - PublicInbox::Import::run_die(['git', "--git-dir=$git->{git_dir}", + run_die(['git', "--git-dir=$unit->{git}->{git_dir}", qw(-c gc.reflogExpire=now gc --prune=all --quiet)]); } -sub sync_ranges ($$$) { - my ($self, $sync, $epoch_max) = @_; +sub sync_ranges ($$) { + my ($self, $sync) = @_; my $reindex = $sync->{reindex}; - - return last_commits($self, $epoch_max) unless $reindex; + return $self->last_commits($sync) unless $reindex; return [] if ref($reindex) ne 'HASH'; my $ranges = $reindex->{from}; # arrayref; @@ -1182,11 +1147,10 @@ sub sync_ranges ($$$) { sub index_xap_only { # git->cat_async callback my ($bref, $oid, $type, $size, $smsg) = @_; - my $self = $smsg->{v2w}; + my $self = delete $smsg->{self}; my $idx = idx_shard($self, $smsg->{num}); - $smsg->{raw_bytes} = $size; - $idx->index_raw($bref, undef, $smsg); - $self->{transact_bytes} += $size; + $idx->index_eml(PublicInbox::Eml->new($bref), $smsg); + $self->{transact_bytes} += $smsg->{bytes}; } sub index_xap_step ($$$;$) { @@ -1201,8 +1165,9 @@ sub index_xap_step ($$$;$) { "$beg..$end (% $step)\n"); } for (my $num = $beg; $num <= $end; $num += $step) { + last if $sync->{quit}; my $smsg = $ibx->over->get_art($num) or next; - $smsg->{v2w} = $self; + $smsg->{self} = $self; $ibx->git->cat_async($smsg->{blob}, \&index_xap_only, $smsg); if ($self->{transact_bytes} >= $self->{batch_bytes}) { ${$sync->{nr}} = $num; @@ -1211,41 +1176,58 @@ sub index_xap_step ($$$;$) { } } -sub index_epoch ($$$) { - my ($self, $sync, $i) = @_; - - my $git_dir = git_dir_n($self, $i); - -d $git_dir or return; # missing epochs are fine - my $git = PublicInbox::Git->new($git_dir); - if (my $unindex_range = delete $sync->{unindex_range}->{$i}) { # rare - unindex($self, $sync, $git, $unindex_range); - } - defined(my $stk = $sync->{stacks}->[$i]) or return; - $sync->{stacks}->[$i] = undef; +sub index_todo ($$$) { + my ($self, $sync, $unit) = @_; + return if $sync->{quit}; + unindex_todo($self, $sync, $unit); + my $stk = delete($unit->{stack}) or return; my $all = $self->git; - while (my ($f, $at, $ct, $oid) = $stk->pop_rec) { - $self->{current_info} = "$i.git $oid"; + my $index_oid = $self->can('index_oid'); + my $unindex_oid = $self->can('unindex_oid'); + my $pfx; + if ($unit->{git}->{git_dir} =~ m!/([^/]+)/git/([0-9]+\.git)\z!) { + $pfx = "$1 $2"; # v2 + } else { # v1 + ($pfx) = ($unit->{git}->{git_dir} =~ m!/([^/]+)\z!g); + $pfx //= $unit->{git}->{git_dir}; + } + local $self->{current_info} = "$pfx "; + local $sync->{latest_cmt} = \(my $latest_cmt); + local $sync->{unit} = $unit; + while (my ($f, $at, $ct, $oid, $cmt) = $stk->pop_rec) { + if ($sync->{quit}) { + warn "waiting to quit...\n"; + $all->async_wait_all; + $self->update_last_commit($sync); + return; + } + my $req = { + %$sync, + autime => $at, + cotime => $ct, + oid => $oid, + cur_cmt => $cmt + }; if ($f eq 'm') { - my $arg = { %$sync, autime => $at, cotime => $ct }; if ($sync->{max_size}) { - $all->check_async($oid, \&check_size, $arg); + $all->check_async($oid, \&check_size, $req); } else { - $all->cat_async($oid, \&index_oid, $arg); + $all->cat_async($oid, $index_oid, $req); } } elsif ($f eq 'd') { - $all->cat_async($oid, \&unindex_oid, $sync); + $all->cat_async($oid, $unindex_oid, $req); } if (${$sync->{need_checkpoint}}) { reindex_checkpoint($self, $sync); } } $all->async_wait_all; - $self->update_last_commit($git, $i, $stk->{latest_cmt}); + $self->update_last_commit($sync, $stk); } sub xapian_only { my ($self, $opt, $sync, $art_beg) = @_; - my $seq = $opt->{sequential_shard}; + my $seq = $opt->{'sequential-shard'}; $art_beg //= 0; local $self->{parallel} = 0 if $seq; $self->idx_init($opt); # acquire lock @@ -1253,7 +1235,7 @@ sub xapian_only { $sync //= { need_checkpoint => \(my $bool = 0), -opt => $opt, - v2w => $self, + self => $self, nr => \(my $nr = 0), -regen_fmt => "%u/?\n", }; @@ -1261,6 +1243,7 @@ sub xapian_only { if ($seq || !$self->{parallel}) { my $shard_end = $self->{shards} - 1; for my $i (0..$shard_end) { + last if $sync->{quit}; index_xap_step($self, $sync, $art_beg + $i); if ($i != $shard_end) { reindex_checkpoint($self, $sync); @@ -1270,7 +1253,8 @@ sub xapian_only { index_xap_step($self, $sync, $art_beg, 1); } } - $self->git->cat_async_wait; + $self->git->async_wait_all; + $self->{ibx}->cleanup; $self->done; } @@ -1280,28 +1264,41 @@ sub index_sync { $opt //= {}; return xapian_only($self, $opt) if $opt->{xapian_only}; - my $pr = $opt->{-progress}; my $epoch_max; - my $latest = git_dir_latest($self, \$epoch_max); - return unless defined $latest; + my $latest = $self->{ibx}->git_dir_latest(\$epoch_max) // return; + if ($opt->{'fast-noop'}) { # nanosecond (st_ctim) comparison + use Time::HiRes qw(stat); + if (my @mm = stat("$self->{ibx}->{inboxdir}/msgmap.sqlite3")) { + my $c = $mm[10]; # 10 = ctime (nsec NV) + my @hd = stat("$latest/refs/heads"); + my @pr = stat("$latest/packed-refs"); + return if $c > ($hd[10] // 0) && $c > ($pr[10] // 0); + } + } - my $seq = $opt->{sequential_shard}; + my $pr = $opt->{-progress}; + my $seq = $opt->{'sequential-shard'}; my $art_beg; # the NNTP article number we start xapian_only at my $idxlevel = $self->{ibx}->{indexlevel}; local $self->{ibx}->{indexlevel} = 'basic' if $seq; $self->idx_init($opt); # acquire lock - fill_alternates($self, $epoch_max); + $self->{mg}->fill_alternates; $self->{oidx}->rethread_prepare($opt); my $sync = { need_checkpoint => \(my $bool = 0), - unindex_range => {}, # EPOCH => oid_old..oid_new reindex => $opt->{reindex}, -opt => $opt, - v2w => $self, + self => $self, + ibx => $self->{ibx}, + epoch_max => $epoch_max, }; - $sync->{ranges} = sync_ranges($self, $sync, $epoch_max); - if (sync_prepare($self, $sync, $epoch_max)) { + my $quit = PublicInbox::SearchIdx::quit_cb($sync); + local $SIG{QUIT} = $quit; + local $SIG{INT} = $quit; + local $SIG{TERM} = $quit; + + if (sync_prepare($self, $sync)) { # tmp_clone seems to fail if inside a transaction, so # we rollback here (because we opened {mm} for reading) # Note: we do NOT rely on DBI transactions for atomicity; @@ -1313,16 +1310,13 @@ sub index_sync { # xapian_only works incrementally w/o --reindex if ($seq && !$opt->{reindex}) { - $art_beg = $sync->{mm_tmp}->max; - $art_beg++ if defined($art_beg); + $art_beg = $sync->{mm_tmp}->max || -1; + $art_beg++; } } - if ($sync->{max_size} = $opt->{max_size}) { - $sync->{index_oid} = \&index_oid; - } # work forwards through history - index_epoch($self, $sync, $_) for (0..$epoch_max); - $self->{oidx}->rethread_done($opt); + index_todo($self, $sync, $_) for @{delete($sync->{todo}) // []}; + $self->{oidx}->rethread_done($opt) unless $sync->{quit}; $self->done; if (my $nr = $sync->{nr}) { @@ -1330,14 +1324,21 @@ sub index_sync { $pr->('all.git '.sprintf($sync->{-regen_fmt}, $$nr)) if $pr; } + my $quit_warn; # deal with Xapian shards sequentially if ($seq && delete($sync->{mm_tmp})) { - $self->{ibx}->{indexlevel} = $idxlevel; - xapian_only($self, $opt, $sync, $art_beg); + if ($sync->{quit}) { + $quit_warn = 1; + } else { + $self->{ibx}->{indexlevel} = $idxlevel; + xapian_only($self, $opt, $sync, $art_beg); + $quit_warn = 1 if $sync->{quit}; + } } # --reindex on the command-line - if ($opt->{reindex} && !ref($opt->{reindex}) && $idxlevel ne 'basic') { + if (!$sync->{quit} && $opt->{reindex} && + !ref($opt->{reindex}) && $idxlevel ne 'basic') { $self->lock_acquire; my $s0 = PublicInbox::SearchIdx->new($self->{ibx}, 0, 0); if (my $xdb = $s0->idx_acquire) { @@ -1349,12 +1350,27 @@ sub index_sync { } # reindex does not pick up new changes, so we rerun w/o it: - if ($opt->{reindex}) { + if ($opt->{reindex} && !$sync->{quit} && + !grep(defined, @$opt{qw(since until)})) { my %again = %$opt; $sync = undef; delete @again{qw(rethread reindex -skip_lock)}; index_sync($self, \%again); + $opt->{quit} = $again{quit}; # propagate to caller + } + warn <{lei}) { + $lei->_lei_atfork_child; + my $pkt_op_p = delete $lei->{pkt_op_p}; + close($pkt_op_p->{op_p}); } + $self->SUPER::ipc_atfork_child; } 1;