# an estimate of the post-packed size to the raw uncompressed size
my $PACKING_FACTOR = 0.4;
-# assume 2 cores if GNU nproc(1) is not available
+# SATA storage lags behind what CPUs are capable of, so relying on
+# nproc(1) can be misleading and having extra Xapian partions is a
+# waste of FDs and space. It can also lead to excessive IO latency
+# and slow things down. Users on NVME or other fast storage can
+# use the NPROC env or switches in our script/public-inbox-* programs
+# to increase Xapian partitions.
+our $NPROC_MAX_DEFAULT = 4;
+
sub nproc_parts ($) {
my ($creat_opt) = @_;
if (ref($creat_opt) eq 'HASH') {
}
}
- my $n = int($ENV{NPROC} || `nproc 2>/dev/null` || 2);
+ my $n = $ENV{NPROC};
+ if (!$n) {
+ chomp($n = `nproc 2>/dev/null`);
+ # assume 2 cores if GNU nproc(1) is not available
+ $n = 2 if !$n;
+ $n = $NPROC_MAX_DEFAULT if $NPROC_MAX_DEFAULT > 4;
+ }
+
# subtract for the main process and git-fast-import
$n -= 1;
$n < 1 ? 1 : $n;
# due to -compact
if (-d $xpfx) {
foreach my $part (<$xpfx/*>) {
- -d $part && $part =~ m!/\d+\z! or next;
+ -d $part && $part =~ m!/[0-9]+\z! or next;
eval {
Search::Xapian::Database->new($part)->close;
$nparts++;
my $latest;
opendir my $dh, $pfx or die "opendir $pfx: $!\n";
while (defined(my $git_dir = readdir($dh))) {
- $git_dir =~ m!\A(\d+)\.git\z! or next;
+ $git_dir =~ m!\A([0-9]+)\.git\z! or next;
if ($1 > $$max) {
$$max = $1;
$latest = "$pfx/$git_dir";
return $tip; # all of it
};
+ # fast equality check to avoid (v)fork+execve overhead
+ if ($cur eq $tip) {
+ $sync->{ranges}->[$i] = undef;
+ return;
+ }
+
my $range = "$cur..$tip";
$pr->("$i.git checking contiguity... ") if $pr;
if (is_ancestor($git, $cur, $tip)) { # common case
$pr->("$n\n") if $pr;
$regen_max += $n;
}
+
+ return 0 if (!$regen_max && !keys(%{$self->{unindex_range}}));
+
# reindex should NOT see new commits anymore, if we do,
# it's a problem and we need to notice it via die()
my $pad = length($regen_max) + 1;
return unless defined $latest;
$self->idx_init($opt); # acquire lock
my $sync = {
- mm_tmp => $self->{mm}->tmp_clone,
D => {}, # "$mid\0$cid" => $oid
unindex_range => {}, # EPOCH => oid_old..oid_new
reindex => $opt->{reindex},
$sync->{ranges} = sync_ranges($self, $sync, $epoch_max);
$sync->{regen} = sync_prepare($self, $sync, $epoch_max);
+ if ($sync->{regen}) {
+ # tmp_clone seems to fail if inside a transaction, so
+ # we rollback here (because we opened {mm} for reading)
+ # Note: we do NOT rely on DBI transactions for atomicity;
+ # only for batch performance.
+ $self->{mm}->{dbh}->rollback;
+ $self->{mm}->{dbh}->begin_work;
+ $sync->{mm_tmp} = $self->{mm}->tmp_clone;
+ }
+
# work backwards through history
for (my $i = $epoch_max; $i >= 0; $i--) {
index_epoch($self, $sync, $i);
$git->cleanup;
}
$self->done;
- if (my $pr = $sync->{-opt}->{-progress}) {
- $pr->('all.git '.sprintf($sync->{-regen_fmt}, $sync->{nr}));
+
+ if (my $nr = $sync->{nr}) {
+ my $pr = $sync->{-opt}->{-progress};
+ $pr->('all.git '.sprintf($sync->{-regen_fmt}, $nr)) if $pr;
}
# reindex does not pick up new changes, so we rerun w/o it: