X-Git-Url: http://www.git.stargrave.org/?a=blobdiff_plain;f=lib%2FPublicInbox%2FV2Writable.pm;h=6a88f62aea1ab3042aef1875a23bd45d3dd643a0;hb=3c39f9c942a6975245fda878e9b957d8d3367662;hp=6b01171271475bdf6f082f25c12f7388d1f7f611;hpb=5782c00d515df59933ec72ba53b6b243210d7c59;p=public-inbox.git
diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm
index 6b011712..6a88f62a 100644
--- a/lib/PublicInbox/V2Writable.pm
+++ b/lib/PublicInbox/V2Writable.pm
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 all contributors
+# Copyright (C) 2018-2019 all contributors
# License: AGPL-3.0+
# This interface wraps and mimics PublicInbox::Import
@@ -7,11 +7,11 @@ package PublicInbox::V2Writable;
use strict;
use warnings;
use base qw(PublicInbox::Lock);
-use PublicInbox::SearchIdxPart;
+use PublicInbox::SearchIdxShard;
use PublicInbox::MIME;
use PublicInbox::Git;
use PublicInbox::Import;
-use PublicInbox::MID qw(mids);
+use PublicInbox::MID qw(mids references);
use PublicInbox::ContentId qw(content_id content_digest);
use PublicInbox::Inbox;
use PublicInbox::OverIdx;
@@ -23,8 +23,15 @@ use IO::Handle;
# an estimate of the post-packed size to the raw uncompressed size
my $PACKING_FACTOR = 0.4;
-# assume 2 cores if GNU nproc(1) is not available
-sub nproc_parts ($) {
+# SATA storage lags behind what CPUs are capable of, so relying on
+# nproc(1) can be misleading and having extra Xapian shards is a
+# waste of FDs and space. It can also lead to excessive IO latency
+# and slow things down. Users on NVME or other fast storage can
+# use the NPROC env or switches in our script/public-inbox-* programs
+# to increase Xapian shards
+our $NPROC_MAX_DEFAULT = 4;
+
+sub nproc_shards ($) {
my ($creat_opt) = @_;
if (ref($creat_opt) eq 'HASH') {
if (defined(my $n = $creat_opt->{nproc})) {
@@ -32,37 +39,44 @@ sub nproc_parts ($) {
}
}
- my $n = int($ENV{NPROC} || `nproc 2>/dev/null` || 2);
+ my $n = $ENV{NPROC};
+ if (!$n) {
+ chomp($n = `nproc 2>/dev/null`);
+ # assume 2 cores if GNU nproc(1) is not available
+ $n = 2 if !$n;
+ $n = $NPROC_MAX_DEFAULT if $n > $NPROC_MAX_DEFAULT;
+ }
+
# subtract for the main process and git-fast-import
$n -= 1;
$n < 1 ? 1 : $n;
}
-sub count_partitions ($) {
+sub count_shards ($) {
my ($self) = @_;
- my $nparts = 0;
+ my $n = 0;
my $xpfx = $self->{xpfx};
- # always load existing partitions in case core count changes:
- # Also, partition count may change while -watch is running
- # due to -compact
+ # always load existing shards in case core count changes:
+ # Also, shard count may change while -watch is running
+ # due to "xcpdb --reshard"
if (-d $xpfx) {
- foreach my $part (<$xpfx/*>) {
- -d $part && $part =~ m!/\d+\z! or next;
+ foreach my $shard (<$xpfx/*>) {
+ -d $shard && $shard =~ m!/[0-9]+\z! or next;
eval {
- Search::Xapian::Database->new($part)->close;
- $nparts++;
+ Search::Xapian::Database->new($shard)->close;
+ $n++;
};
}
}
- $nparts;
+ $n;
}
sub new {
# $creat may be any true value, or 0/undef. A hashref is true,
# and $creat->{nproc} may be set to an integer
my ($class, $v2ibx, $creat) = @_;
- my $dir = $v2ibx->{mainrepo} or die "no mainrepo in inbox\n";
+ my $dir = $v2ibx->{inboxdir} or die "no inboxdir in inbox\n";
unless (-d $dir) {
if ($creat) {
require File::Path;
@@ -89,7 +103,7 @@ sub new {
rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR),
last_commit => [], # git repo -> commit
};
- $self->{partitions} = count_partitions($self) || nproc_parts($creat);
+ $self->{shards} = count_shards($self) || nproc_shards($creat);
bless $self, $class;
}
@@ -116,6 +130,16 @@ sub add {
});
}
+# indexes a message, returns true if checkpointing is needed
+sub do_idx ($$$$$$$) {
+ my ($self, $msgref, $mime, $len, $num, $oid, $mid0) = @_;
+ $self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
+ my $idx = idx_shard($self, $num % $self->{shards});
+ $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime);
+ my $n = $self->{transact_bytes} += $len;
+ $n >= (PublicInbox::SearchIdx::BATCH_BYTES * $self->{shards});
+}
+
sub _add {
my ($self, $mime, $check_cb) = @_;
@@ -141,13 +165,7 @@ sub _add {
$self->{last_commit}->[$self->{epoch_max}] = $cmt;
my ($oid, $len, $msgref) = @{$im->{last_object}};
- $self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
- my $nparts = $self->{partitions};
- my $part = $num % $nparts;
- my $idx = $self->idx_part($part);
- $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime);
- my $n = $self->{transact_bytes} += $len;
- if ($n > (PublicInbox::SearchIdx::BATCH_BYTES * $nparts)) {
+ if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
$self->checkpoint;
}
@@ -232,15 +250,15 @@ sub num_for_harder {
$num;
}
-sub idx_part {
- my ($self, $part) = @_;
- $self->{idx_parts}->[$part];
+sub idx_shard {
+ my ($self, $shard_i) = @_;
+ $self->{idx_shards}->[$shard_i];
}
# idempotent
sub idx_init {
my ($self, $opt) = @_;
- return if $self->{idx_parts};
+ return if $self->{idx_shards};
my $ibx = $self->{-inbox};
# do not leak read-only FDs to child processes, we only have these
@@ -268,49 +286,53 @@ sub idx_init {
$self->lock_acquire unless ($opt && $opt->{-skip_lock});
$over->create;
- # -compact can change partition count while -watch is idle
- my $nparts = count_partitions($self);
- if ($nparts && $nparts != $self->{partitions}) {
- $self->{partitions} = $nparts;
+ # xcpdb can change shard count while -watch is idle
+ my $nshards = count_shards($self);
+ if ($nshards && $nshards != $self->{shards}) {
+ $self->{shards} = $nshards;
}
- # need to create all parts before initializing msgmap FD
- my $max = $self->{partitions} - 1;
+ # need to create all shards before initializing msgmap FD
+ my $max = $self->{shards} - 1;
- # idx_parts must be visible to all forked processes
- my $idx = $self->{idx_parts} = [];
+ # idx_shards must be visible to all forked processes
+ my $idx = $self->{idx_shards} = [];
for my $i (0..$max) {
- push @$idx, PublicInbox::SearchIdxPart->new($self, $i);
+ push @$idx, PublicInbox::SearchIdxShard->new($self, $i);
}
# Now that all subprocesses are up, we can open the FDs
# for SQLite:
my $mm = $self->{mm} = PublicInbox::Msgmap->new_file(
- "$self->{-inbox}->{mainrepo}/msgmap.sqlite3", 1);
+ "$self->{-inbox}->{inboxdir}/msgmap.sqlite3", 1);
$mm->{dbh}->begin_work;
});
}
-sub purge_oids ($$) {
- my ($self, $purge) = @_; # $purge = { $object_id => 1, ... }
+# returns an array mapping [ epoch => latest_commit ]
+# latest_commit may be undef if nothing was done to that epoch
+# $replace_map = { $object_id => $strref, ... }
+sub _replace_oids ($$$) {
+ my ($self, $mime, $replace_map) = @_;
$self->done;
- my $pfx = "$self->{-inbox}->{mainrepo}/git";
- my $purges = [];
+ my $pfx = "$self->{-inbox}->{inboxdir}/git";
+ my $rewrites = []; # epoch => commit
my $max = $self->{epoch_max};
unless (defined($max)) {
defined(my $latest = git_dir_latest($self, \$max)) or return;
$self->{epoch_max} = $max;
}
+
foreach my $i (0..$max) {
my $git_dir = "$pfx/$i.git";
-d $git_dir or next;
my $git = PublicInbox::Git->new($git_dir);
my $im = $self->import_init($git, 0, 1);
- $purges->[$i] = $im->purge_oids($purge);
+ $rewrites->[$i] = $im->replace_oids($mime, $replace_map);
$im->done;
}
- $purges;
+ $rewrites;
}
sub content_ids ($) {
@@ -333,25 +355,30 @@ sub content_matches ($$) {
0
}
-sub remove_internal ($$$$) {
- my ($self, $mime, $cmt_msg, $purge) = @_;
+# used for removing or replacing (purging)
+sub rewrite_internal ($$;$$$) {
+ my ($self, $old_mime, $cmt_msg, $new_mime, $sref) = @_;
$self->idx_init;
- my $im = $self->importer unless $purge;
+ my ($im, $need_reindex, $replace_map);
+ if ($sref) {
+ $replace_map = {}; # oid => sref
+ $need_reindex = [] if $new_mime;
+ } else {
+ $im = $self->importer;
+ }
my $over = $self->{over};
- my $cids = content_ids($mime);
- my $parts = $self->{idx_parts};
- my $mm = $self->{mm};
+ my $cids = content_ids($old_mime);
my $removed;
- my $mids = mids($mime->header_obj);
+ my $mids = mids($old_mime->header_obj);
# We avoid introducing new blobs into git since the raw content
# can be slightly different, so we do not need the user-supplied
# message now that we have the mids and content_id
- $mime = undef;
+ $old_mime = undef;
my $mark;
foreach my $mid (@$mids) {
- my %gone;
+ my %gone; # num => [ smsg, raw ]
my ($id, $prev);
while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
my $msg = get_blob($self, $smsg);
@@ -374,17 +401,21 @@ sub remove_internal ($$$$) {
}
foreach my $num (keys %gone) {
my ($smsg, $orig) = @{$gone{$num}};
- $mm->num_delete($num);
# $removed should only be set once assuming
# no bugs in our deduplication code:
$removed = $smsg;
my $oid = $smsg->{blob};
- if ($purge) {
- $purge->{$oid} = 1;
+ if ($replace_map) {
+ $replace_map->{$oid} = $sref;
} else {
($mark, undef) = $im->remove($orig, $cmt_msg);
}
$orig = undef;
+ if ($need_reindex) { # ->replace
+ push @$need_reindex, $smsg;
+ } else { # ->purge or ->remove
+ $self->{mm}->num_delete($num);
+ }
unindex_oid_remote($self, $oid, $mid);
}
}
@@ -393,8 +424,9 @@ sub remove_internal ($$$$) {
my $cmt = $im->get_mark($mark);
$self->{last_commit}->[$self->{epoch_max}] = $cmt;
}
- if ($purge && scalar keys %$purge) {
- return purge_oids($self, $purge);
+ if ($replace_map && scalar keys %$replace_map) {
+ my $rewrites = _replace_oids($self, $new_mime, $replace_map);
+ return { rewrites => $rewrites, need_reindex => $need_reindex };
}
$removed;
}
@@ -403,25 +435,128 @@ sub remove_internal ($$$$) {
sub remove {
my ($self, $mime, $cmt_msg) = @_;
$self->{-inbox}->with_umask(sub {
- remove_internal($self, $mime, $cmt_msg, undef);
+ rewrite_internal($self, $mime, $cmt_msg);
});
}
+sub _replace ($$;$$) {
+ my ($self, $old_mime, $new_mime, $sref) = @_;
+ my $rewritten = $self->{-inbox}->with_umask(sub {
+ rewrite_internal($self, $old_mime, undef, $new_mime, $sref);
+ }) or return;
+
+ my $rewrites = $rewritten->{rewrites};
+ # ->done is called if there are rewrites since we gc+prune from git
+ $self->idx_init if @$rewrites;
+
+ for my $i (0..$#$rewrites) {
+ defined(my $cmt = $rewrites->[$i]) or next;
+ $self->{last_commit}->[$i] = $cmt;
+ }
+ $rewritten;
+}
+
# public
sub purge {
my ($self, $mime) = @_;
- my $purges = $self->{-inbox}->with_umask(sub {
- remove_internal($self, $mime, undef, {});
- }) or return;
- $self->idx_init if @$purges; # ->done is called on purges
- for my $i (0..$#$purges) {
- defined(my $cmt = $purges->[$i]) or next;
- $self->{last_commit}->[$i] = $cmt;
+ my $rewritten = _replace($self, $mime, undef, \'') or return;
+ $rewritten->{rewrites}
+}
+
+# returns the git object_id of $fh, does not write the object to FS
+sub git_hash_raw ($$) {
+ my ($self, $raw) = @_;
+ # grab the expected OID we have to reindex:
+ open my $tmp_fh, '+>', undef or die "failed to open tmp: $!";
+ $tmp_fh->autoflush(1);
+ print $tmp_fh $$raw or die "print \$tmp_fh: $!";
+ sysseek($tmp_fh, 0, 0) or die "seek failed: $!";
+
+ my ($r, $w);
+ pipe($r, $w) or die "failed to create pipe: $!";
+ my $rdr = { 0 => fileno($tmp_fh), 1 => fileno($w) };
+ my $git_dir = $self->{-inbox}->git->{git_dir};
+ my $cmd = ['git', "--git-dir=$git_dir", qw(hash-object --stdin)];
+ my $pid = spawn($cmd, undef, $rdr);
+ close $w;
+ local $/ = "\n";
+ chomp(my $oid = <$r>);
+ waitpid($pid, 0) == $pid or die "git hash-object did not finish";
+ die "git hash-object failed: $?" if $?;
+ $oid =~ /\A[a-f0-9]{40}\z/ or die "OID not expected: $oid";
+ $oid;
+}
+
+sub _check_mids_match ($$$) {
+ my ($old_list, $new_list, $hdrs) = @_;
+ my %old_mids = map { $_ => 1 } @$old_list;
+ my %new_mids = map { $_ => 1 } @$new_list;
+ my @old = keys %old_mids;
+ my @new = keys %new_mids;
+ my $err = "$hdrs may not be changed when replacing\n";
+ die $err if scalar(@old) != scalar(@new);
+ delete @new_mids{@old};
+ delete @old_mids{@new};
+ die $err if (scalar(keys %old_mids) || scalar(keys %new_mids));
+}
+
+# Changing Message-IDs or References with ->replace isn't supported.
+# The rules for dealing with messages with multiple or conflicting
+# Message-IDs are pretty complex and rethreading hasn't been fully
+# implemented, yet.
+sub check_mids_match ($$) {
+ my ($old_mime, $new_mime) = @_;
+ my $old = $old_mime->header_obj;
+ my $new = $new_mime->header_obj;
+ _check_mids_match(mids($old), mids($new), 'Message-ID(s)');
+ _check_mids_match(references($old), references($new),
+ 'References/In-Reply-To');
+}
+
+# public
+sub replace ($$$) {
+ my ($self, $old_mime, $new_mime) = @_;
+
+ check_mids_match($old_mime, $new_mime);
+
+ # mutt will always add Content-Length:, Status:, Lines: when editing
+ PublicInbox::Import::drop_unwanted_headers($new_mime);
+
+ my $raw = $new_mime->as_string;
+ my $expect_oid = git_hash_raw($self, \$raw);
+ my $rewritten = _replace($self, $old_mime, $new_mime, \$raw) or return;
+ my $need_reindex = $rewritten->{need_reindex};
+
+ # just in case we have bugs in deduplication code:
+ my $n = scalar(@$need_reindex);
+ if ($n > 1) {
+ my $list = join(', ', map {
+ "$_->{num}: <$_->{mid}>"
+ } @$need_reindex);
+ warn <<"";
+W: rewritten $n messages matching content of original message (expected: 1).
+W: possible bug in public-inbox, NNTP article IDs and Message-IDs follow:
+W: $list
+
}
- $purges;
+
+ # make sure we really got the OID:
+ my ($oid, $type, $len) = $self->{-inbox}->git->check($expect_oid);
+ $oid eq $expect_oid or die "BUG: $expect_oid not found after replace";
+
+ # don't leak FDs to Xapian:
+ $self->{-inbox}->git->cleanup;
+
+ # reindex modified messages:
+ for my $smsg (@$need_reindex) {
+ my $num = $smsg->{num};
+ my $mid0 = $smsg->{mid};
+ do_idx($self, \$raw, $new_mime, $len, $num, $oid, $mid0);
+ }
+ $rewritten->{rewrites};
}
-sub last_commit_part ($$;$) {
+sub last_epoch_commit ($$;$) {
my ($self, $i, $cmt) = @_;
my $v = PublicInbox::Search::SCHEMA_VERSION();
$self->{mm}->last_commit_xap($v, $i, $cmt);
@@ -434,7 +569,7 @@ sub set_last_commits ($) {
foreach my $i (0..$epoch_max) {
defined(my $cmt = $last_commit->[$i]) or next;
$last_commit->[$i] = undef;
- last_commit_part($self, $i, $cmt);
+ last_epoch_commit($self, $i, $cmt);
}
}
@@ -452,7 +587,7 @@ sub barrier_wait {
while (scalar keys %$barrier) {
defined(my $l = $r->getline) or die "EOF on barrier_wait: $!";
$l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l";
- delete $barrier->{$1} or die "bad part[$1] on barrier wait";
+ delete $barrier->{$1} or die "bad shard[$1] on barrier wait";
}
}
@@ -467,8 +602,8 @@ sub checkpoint ($;$) {
$im->checkpoint;
}
}
- my $parts = $self->{idx_parts};
- if ($parts) {
+ my $shards = $self->{idx_shards};
+ if ($shards) {
my $dbh = $self->{mm}->{dbh};
# SQLite msgmap data is second in importance
@@ -479,19 +614,19 @@ sub checkpoint ($;$) {
# Now deal with Xapian
if ($wait) {
- my $barrier = $self->barrier_init(scalar @$parts);
+ my $barrier = $self->barrier_init(scalar @$shards);
- # each partition needs to issue a barrier command
- $_->remote_barrier for @$parts;
+ # each shard needs to issue a barrier command
+ $_->remote_barrier for @$shards;
- # wait for each Xapian partition
+ # wait for each Xapian shard
$self->barrier_wait($barrier);
} else {
- $_->remote_commit for @$parts;
+ $_->remote_commit for @$shards;
}
# last_commit is special, don't commit these until
- # remote partitions are done:
+ # remote shards are done:
$dbh->begin_work;
set_last_commits($self);
$dbh->commit;
@@ -514,30 +649,26 @@ sub done {
checkpoint($self);
my $mm = delete $self->{mm};
$mm->{dbh}->commit if $mm;
- my $parts = delete $self->{idx_parts};
- if ($parts) {
- $_->remote_close for @$parts;
+ my $shards = delete $self->{idx_shards};
+ if ($shards) {
+ $_->remote_close for @$shards;
}
$self->{over}->disconnect;
delete $self->{bnote};
$self->{transact_bytes} = 0;
- $self->lock_release if $parts;
+ $self->lock_release if $shards;
$self->{-inbox}->git->cleanup;
}
sub fill_alternates ($$) {
my ($self, $epoch) = @_;
- my $pfx = "$self->{-inbox}->{mainrepo}/git";
- my $all = "$self->{-inbox}->{mainrepo}/all.git";
- my @cmd;
+ my $pfx = "$self->{-inbox}->{inboxdir}/git";
+ my $all = "$self->{-inbox}->{inboxdir}/all.git";
+
unless (-d $all) {
PublicInbox::Import::init_bare($all);
}
- @cmd = (qw/git config/, "--file=$pfx/$epoch.git/config",
- 'include.path', '../../all.git/config');
- PublicInbox::Import::run_die(\@cmd);
-
my $alt = "$all/objects/info/alternates";
my %alts;
my @add;
@@ -559,9 +690,12 @@ sub fill_alternates ($$) {
sub git_init {
my ($self, $epoch) = @_;
- my $git_dir = "$self->{-inbox}->{mainrepo}/git/$epoch.git";
+ my $git_dir = "$self->{-inbox}->{inboxdir}/git/$epoch.git";
my @cmd = (qw(git init --bare -q), $git_dir);
PublicInbox::Import::run_die(\@cmd);
+ @cmd = (qw/git config/, "--file=$git_dir/config",
+ 'include.path', '../../all.git/config');
+ PublicInbox::Import::run_die(\@cmd);
fill_alternates($self, $epoch);
$git_dir
}
@@ -569,12 +703,12 @@ sub git_init {
sub git_dir_latest {
my ($self, $max) = @_;
$$max = -1;
- my $pfx = "$self->{-inbox}->{mainrepo}/git";
+ my $pfx = "$self->{-inbox}->{inboxdir}/git";
return unless -d $pfx;
my $latest;
opendir my $dh, $pfx or die "opendir $pfx: $!\n";
while (defined(my $git_dir = readdir($dh))) {
- $git_dir =~ m!\A(\d+)\.git\z! or next;
+ $git_dir =~ m!\A([0-9]+)\.git\z! or next;
if ($1 > $$max) {
$$max = $1;
$latest = "$pfx/$git_dir";
@@ -689,8 +823,8 @@ sub atfork_child {
my ($self) = @_;
my $fh = delete $self->{reindex_pipe};
close $fh if $fh;
- if (my $parts = $self->{idx_parts}) {
- $_->atfork_child foreach @$parts;
+ if (my $shards = $self->{idx_shards}) {
+ $_->atfork_child foreach @$shards;
}
if (my $im = $self->{im}) {
$im->atfork_child;
@@ -772,15 +906,8 @@ sub reindex_oid ($$$$) {
}
$sync->{mm_tmp}->mid_delete($mid0) or
die "failed to delete <$mid0> for article #$num\n";
-
- $self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
- my $nparts = $self->{partitions};
- my $part = $num % $nparts;
- my $idx = $self->idx_part($part);
- $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime);
- my $n = $self->{transact_bytes} += $len;
$sync->{nr}++;
- if ($n > (PublicInbox::SearchIdx::BATCH_BYTES * $nparts)) {
+ if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
$git->cleanup;
$sync->{mm_tmp}->atfork_prepare;
$self->done; # release lock
@@ -799,22 +926,22 @@ sub reindex_oid ($$$$) {
# only update last_commit for $i on reindex iff newer than current
sub update_last_commit ($$$$) {
my ($self, $git, $i, $cmt) = @_;
- my $last = last_commit_part($self, $i);
+ my $last = last_epoch_commit($self, $i);
if (defined $last && is_ancestor($git, $last, $cmt)) {
my @cmd = (qw(rev-list --count), "$last..$cmt");
chomp(my $n = $git->qx(@cmd));
return if $n ne '' && $n == 0;
}
- last_commit_part($self, $i, $cmt);
+ last_epoch_commit($self, $i, $cmt);
}
-sub git_dir_n ($$) { "$_[0]->{-inbox}->{mainrepo}/git/$_[1].git" }
+sub git_dir_n ($$) { "$_[0]->{-inbox}->{inboxdir}/git/$_[1].git" }
sub last_commits ($$) {
my ($self, $epoch_max) = @_;
my $heads = [];
for (my $i = $epoch_max; $i >= 0; $i--) {
- $heads->[$i] = last_commit_part($self, $i);
+ $heads->[$i] = last_epoch_commit($self, $i);
}
$heads;
}
@@ -831,6 +958,12 @@ sub log_range ($$$$$) {
return $tip; # all of it
};
+ # fast equality check to avoid (v)fork+execve overhead
+ if ($cur eq $tip) {
+ $sync->{ranges}->[$i] = undef;
+ return;
+ }
+
my $range = "$cur..$tip";
$pr->("$i.git checking contiguity... ") if $pr;
if (is_ancestor($git, $cur, $tip)) { # common case
@@ -861,7 +994,7 @@ Rewritten history? (in $git->{git_dir})
reindexing $git->{git_dir} starting at
$range
- $sync->{"unindex-range.$i"} = "$base..$cur";
+ $sync->{unindex_range}->{$i} = "$base..$cur";
}
$range;
}
@@ -879,7 +1012,7 @@ sub sync_prepare ($$$) {
for (my $i = $epoch_max; $i >= 0; $i--) {
die 'BUG: already indexing!' if $self->{reindex_pipe};
my $git_dir = git_dir_n($self, $i);
- -d $git_dir or next; # missing parts are fine
+ -d $git_dir or next; # missing epochs are fine
my $git = PublicInbox::Git->new($git_dir);
if ($reindex_heads) {
$head = $reindex_heads->[$i] or next;
@@ -900,6 +1033,9 @@ sub sync_prepare ($$$) {
$pr->("$n\n") if $pr;
$regen_max += $n;
}
+
+ return 0 if (!$regen_max && !keys(%{$self->{unindex_range}}));
+
# reindex should NOT see new commits anymore, if we do,
# it's a problem and we need to notice it via die()
my $pad = length($regen_max) + 1;
@@ -911,7 +1047,7 @@ sub sync_prepare ($$$) {
sub unindex_oid_remote ($$$) {
my ($self, $oid, $mid) = @_;
- $_->remote_remove($oid, $mid) foreach @{$self->{idx_parts}};
+ $_->remote_remove($oid, $mid) foreach @{$self->{idx_shards}};
$self->{over}->remove_oid($oid, $mid);
}
@@ -981,6 +1117,42 @@ sub sync_ranges ($$$) {
$ranges;
}
+sub index_epoch ($$$) {
+ my ($self, $sync, $i) = @_;
+
+ my $git_dir = git_dir_n($self, $i);
+ die 'BUG: already reindexing!' if $self->{reindex_pipe};
+ -d $git_dir or return; # missing epochs are fine
+ fill_alternates($self, $i);
+ my $git = PublicInbox::Git->new($git_dir);
+ if (my $unindex_range = delete $sync->{unindex_range}->{$i}) {
+ unindex($self, $sync, $git, $unindex_range);
+ }
+ defined(my $range = $sync->{ranges}->[$i]) or return;
+ if (my $pr = $sync->{-opt}->{-progress}) {
+ $pr->("$i.git indexing $range\n");
+ }
+
+ my @cmd = qw(log --raw -r --pretty=tformat:%H
+ --no-notes --no-color --no-abbrev --no-renames);
+ my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $range);
+ my $cmt;
+ while (<$fh>) {
+ chomp;
+ $self->{current_info} = "$i.git $_";
+ if (/\A$x40$/o && !defined($cmt)) {
+ $cmt = $_;
+ } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o) {
+ reindex_oid($self, $sync, $git, $1);
+ } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\td$/o) {
+ mark_deleted($self, $sync, $git, $1);
+ }
+ }
+ $fh = undef;
+ delete $self->{reindex_pipe};
+ update_last_commit($self, $git, $i, $cmt) if defined $cmt;
+}
+
# public, called by public-inbox-index
sub index_sync {
my ($self, $opt) = @_;
@@ -991,44 +1163,27 @@ sub index_sync {
return unless defined $latest;
$self->idx_init($opt); # acquire lock
my $sync = {
- mm_tmp => $self->{mm}->tmp_clone,
D => {}, # "$mid\0$cid" => $oid
+ unindex_range => {}, # EPOCH => oid_old..oid_new
reindex => $opt->{reindex},
-opt => $opt
};
$sync->{ranges} = sync_ranges($self, $sync, $epoch_max);
$sync->{regen} = sync_prepare($self, $sync, $epoch_max);
- my @cmd = qw(log --raw -r --pretty=tformat:%H
- --no-notes --no-color --no-abbrev --no-renames);
+ if ($sync->{regen}) {
+ # tmp_clone seems to fail if inside a transaction, so
+ # we rollback here (because we opened {mm} for reading)
+ # Note: we do NOT rely on DBI transactions for atomicity;
+ # only for batch performance.
+ $self->{mm}->{dbh}->rollback;
+ $self->{mm}->{dbh}->begin_work;
+ $sync->{mm_tmp} = $self->{mm}->tmp_clone;
+ }
# work backwards through history
for (my $i = $epoch_max; $i >= 0; $i--) {
- my $git_dir = git_dir_n($self, $i);
- die 'BUG: already reindexing!' if $self->{reindex_pipe};
- -d $git_dir or next; # missing parts are fine
- fill_alternates($self, $i);
- my $git = PublicInbox::Git->new($git_dir);
- my $unindex_range = delete $sync->{"unindex-range.$i"};
- unindex($self, $sync, $git, $unindex_range) if $unindex_range;
- defined(my $range = $sync->{ranges}->[$i]) or next;
- $pr->("$i.git indexing $range\n") if $pr;
- my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $range);
- my $cmt;
- while (<$fh>) {
- chomp;
- $self->{current_info} = "$i.git $_";
- if (/\A$x40$/o && !defined($cmt)) {
- $cmt = $_;
- } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o) {
- reindex_oid($self, $sync, $git, $1);
- } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\td$/o) {
- mark_deleted($self, $sync, $git, $1);
- }
- }
- $fh = undef;
- delete $self->{reindex_pipe};
- update_last_commit($self, $git, $i, $cmt) if defined $cmt;
+ index_epoch($self, $sync, $i);
}
# unindex is required for leftovers if "deletes" affect messages
@@ -1039,8 +1194,10 @@ sub index_sync {
$git->cleanup;
}
$self->done;
- if (my $pr = $sync->{-opt}->{-progress}) {
- $pr->('all.git '.sprintf($sync->{-regen_fmt}, $sync->{nr}));
+
+ if (my $nr = $sync->{nr}) {
+ my $pr = $sync->{-opt}->{-progress};
+ $pr->('all.git '.sprintf($sync->{-regen_fmt}, $nr)) if $pr;
}
# reindex does not pick up new changes, so we rerun w/o it: