X-Git-Url: http://www.git.stargrave.org/?a=blobdiff_plain;f=lib%2FPublicInbox%2FV2Writable.pm;h=9d5078285ac9634a115685a96e03b5f157078ee9;hb=e4be18b4f9b39aa58bebdbe00f0f7c8a65f1f82d;hp=aa13aa8fb4722d7215422182bbcc024b5efacacf;hpb=240de56c97d767cd5c819ac0be858359e8d2eff3;p=public-inbox.git diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm index aa13aa8f..9d507828 100644 --- a/lib/PublicInbox/V2Writable.pm +++ b/lib/PublicInbox/V2Writable.pm @@ -1,4 +1,4 @@ -# Copyright (C) 2018 all contributors +# Copyright (C) 2018-2019 all contributors # License: AGPL-3.0+ # This interface wraps and mimics PublicInbox::Import @@ -7,7 +7,7 @@ package PublicInbox::V2Writable; use strict; use warnings; use base qw(PublicInbox::Lock); -use PublicInbox::SearchIdxPart; +use PublicInbox::SearchIdxShard; use PublicInbox::MIME; use PublicInbox::Git; use PublicInbox::Import; @@ -24,14 +24,14 @@ use IO::Handle; my $PACKING_FACTOR = 0.4; # SATA storage lags behind what CPUs are capable of, so relying on -# nproc(1) can be misleading and having extra Xapian partions is a +# nproc(1) can be misleading and having extra Xapian shards is a # waste of FDs and space. It can also lead to excessive IO latency # and slow things down. Users on NVME or other fast storage can # use the NPROC env or switches in our script/public-inbox-* programs -# to increase Xapian partitions. +# to increase Xapian shards our $NPROC_MAX_DEFAULT = 4; -sub nproc_parts ($) { +sub nproc_shards ($) { my ($creat_opt) = @_; if (ref($creat_opt) eq 'HASH') { if (defined(my $n = $creat_opt->{nproc})) { @@ -54,29 +54,29 @@ sub nproc_parts ($) { sub count_shards ($) { my ($self) = @_; - my $nparts = 0; + my $n = 0; my $xpfx = $self->{xpfx}; - # always load existing partitions in case core count changes: + # always load existing shards in case core count changes: # Also, shard count may change while -watch is running # due to "xcpdb --reshard" if (-d $xpfx) { - foreach my $part (<$xpfx/*>) { - -d $part && $part =~ m!/[0-9]+\z! or next; + foreach my $shard (<$xpfx/*>) { + -d $shard && $shard =~ m!/[0-9]+\z! or next; eval { - Search::Xapian::Database->new($part)->close; - $nparts++; + Search::Xapian::Database->new($shard)->close; + $n++; }; } } - $nparts; + $n; } sub new { # $creat may be any true value, or 0/undef. A hashref is true, # and $creat->{nproc} may be set to an integer my ($class, $v2ibx, $creat) = @_; - my $dir = $v2ibx->{mainrepo} or die "no mainrepo in inbox\n"; + my $dir = $v2ibx->{inboxdir} or die "no inboxdir in inbox\n"; unless (-d $dir) { if ($creat) { require File::Path; @@ -103,7 +103,7 @@ sub new { rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR), last_commit => [], # git repo -> commit }; - $self->{shards} = count_shards($self) || nproc_parts($creat); + $self->{shards} = count_shards($self) || nproc_shards($creat); bless $self, $class; } @@ -134,12 +134,10 @@ sub add { sub do_idx ($$$$$$$) { my ($self, $msgref, $mime, $len, $num, $oid, $mid0) = @_; $self->{over}->add_overview($mime, $len, $num, $oid, $mid0); - my $npart = $self->{shards}; - my $part = $num % $npart; - my $idx = idx_part($self, $part); + my $idx = idx_shard($self, $num % $self->{shards}); $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime); my $n = $self->{transact_bytes} += $len; - $n >= (PublicInbox::SearchIdx::BATCH_BYTES * $npart); + $n >= (PublicInbox::SearchIdx::BATCH_BYTES * $self->{shards}); } sub _add { @@ -157,8 +155,7 @@ sub _add { # leaking FDs to it... $self->idx_init; - my $mid0; - my $num = num_for($self, $mime, \$mid0); + my ($num, $mid0) = v2_num_for($self, $mime); defined $num or return; # duplicate defined $mid0 or die "BUG: $mid0 undefined\n"; my $im = $self->importer; @@ -174,16 +171,15 @@ sub _add { $cmt; } -sub num_for { - my ($self, $mime, $mid0) = @_; +sub v2_num_for { + my ($self, $mime) = @_; my $mids = mids($mime->header_obj); if (@$mids) { my $mid = $mids->[0]; my $num = $self->{mm}->mid_insert($mid); if (defined $num) { # common case - $$mid0 = $mid; - return $num; - }; + return ($num, $mid); + } # crap, Message-ID is already known, hope somebody just resent: foreach my $m (@$mids) { @@ -192,7 +188,7 @@ sub num_for { # easy, don't store duplicates # note: do not add more diagnostic info here since # it gets noisy on public-inbox-watch restarts - return if $existing; + return () if $existing; } # AltId may pre-populate article numbers (e.g. X-Mail-Count @@ -203,8 +199,7 @@ sub num_for { my $num = $self->{mm}->num_for($mid); if (defined $num && !$self->{over}->get_art($num)) { - $$mid0 = $mid; - return $num; + return ($num, $mid); } } @@ -217,50 +212,49 @@ sub num_for { $num = $self->{mm}->mid_insert($m); if (defined $num) { warn "alternative <$m> for <$mid> found\n"; - $$mid0 = $m; - return $num; + return ($num, $m); } } } # none of the existing Message-IDs are good, generate a new one: - num_for_harder($self, $mime, $mid0); + v2_num_for_harder($self, $mime); } -sub num_for_harder { - my ($self, $mime, $mid0) = @_; +sub v2_num_for_harder { + my ($self, $mime) = @_; my $hdr = $mime->header_obj; my $dig = content_digest($mime); - $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr); - my $num = $self->{mm}->mid_insert($$mid0); + my $mid0 = PublicInbox::Import::digest2mid($dig, $hdr); + my $num = $self->{mm}->mid_insert($mid0); unless (defined $num) { # it's hard to spoof the last Received: header my @recvd = $hdr->header_raw('Received'); $dig->add("Received: $_") foreach (@recvd); - $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr); - $num = $self->{mm}->mid_insert($$mid0); + $mid0 = PublicInbox::Import::digest2mid($dig, $hdr); + $num = $self->{mm}->mid_insert($mid0); # fall back to a random Message-ID and give up determinism: until (defined($num)) { $dig->add(rand); - $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr); - warn "using random Message-ID <$$mid0> as fallback\n"; - $num = $self->{mm}->mid_insert($$mid0); + $mid0 = PublicInbox::Import::digest2mid($dig, $hdr); + warn "using random Message-ID <$mid0> as fallback\n"; + $num = $self->{mm}->mid_insert($mid0); } } - PublicInbox::Import::append_mid($hdr, $$mid0); - $num; + PublicInbox::Import::append_mid($hdr, $mid0); + ($num, $mid0); } -sub idx_part { - my ($self, $part) = @_; - $self->{idx_parts}->[$part]; +sub idx_shard { + my ($self, $shard_i) = @_; + $self->{idx_shards}->[$shard_i]; } # idempotent sub idx_init { my ($self, $opt) = @_; - return if $self->{idx_parts}; + return if $self->{idx_shards}; my $ibx = $self->{-inbox}; # do not leak read-only FDs to child processes, we only have these @@ -289,24 +283,24 @@ sub idx_init { $over->create; # xcpdb can change shard count while -watch is idle - my $nparts = count_shards($self); - if ($nparts && $nparts != $self->{shards}) { - $self->{shards} = $nparts; + my $nshards = count_shards($self); + if ($nshards && $nshards != $self->{shards}) { + $self->{shards} = $nshards; } - # need to create all parts before initializing msgmap FD + # need to create all shards before initializing msgmap FD my $max = $self->{shards} - 1; - # idx_parts must be visible to all forked processes - my $idx = $self->{idx_parts} = []; + # idx_shards must be visible to all forked processes + my $idx = $self->{idx_shards} = []; for my $i (0..$max) { - push @$idx, PublicInbox::SearchIdxPart->new($self, $i); + push @$idx, PublicInbox::SearchIdxShard->new($self, $i); } # Now that all subprocesses are up, we can open the FDs # for SQLite: my $mm = $self->{mm} = PublicInbox::Msgmap->new_file( - "$self->{-inbox}->{mainrepo}/msgmap.sqlite3", 1); + "$self->{-inbox}->{inboxdir}/msgmap.sqlite3", 1); $mm->{dbh}->begin_work; }); } @@ -317,7 +311,7 @@ sub idx_init { sub _replace_oids ($$$) { my ($self, $mime, $replace_map) = @_; $self->done; - my $pfx = "$self->{-inbox}->{mainrepo}/git"; + my $pfx = "$self->{-inbox}->{inboxdir}/git"; my $rewrites = []; # epoch => commit my $max = $self->{epoch_max}; @@ -370,7 +364,6 @@ sub rewrite_internal ($$;$$$) { } my $over = $self->{over}; my $cids = content_ids($old_mime); - my $parts = $self->{idx_parts}; my $removed; my $mids = mids($old_mime->header_obj); @@ -559,7 +552,7 @@ W: $list $rewritten->{rewrites}; } -sub last_commit_part ($$;$) { +sub last_epoch_commit ($$;$) { my ($self, $i, $cmt) = @_; my $v = PublicInbox::Search::SCHEMA_VERSION(); $self->{mm}->last_commit_xap($v, $i, $cmt); @@ -572,7 +565,7 @@ sub set_last_commits ($) { foreach my $i (0..$epoch_max) { defined(my $cmt = $last_commit->[$i]) or next; $last_commit->[$i] = undef; - last_commit_part($self, $i, $cmt); + last_epoch_commit($self, $i, $cmt); } } @@ -590,7 +583,7 @@ sub barrier_wait { while (scalar keys %$barrier) { defined(my $l = $r->getline) or die "EOF on barrier_wait: $!"; $l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l"; - delete $barrier->{$1} or die "bad part[$1] on barrier wait"; + delete $barrier->{$1} or die "bad shard[$1] on barrier wait"; } } @@ -605,8 +598,8 @@ sub checkpoint ($;$) { $im->checkpoint; } } - my $parts = $self->{idx_parts}; - if ($parts) { + my $shards = $self->{idx_shards}; + if ($shards) { my $dbh = $self->{mm}->{dbh}; # SQLite msgmap data is second in importance @@ -617,19 +610,19 @@ sub checkpoint ($;$) { # Now deal with Xapian if ($wait) { - my $barrier = $self->barrier_init(scalar @$parts); + my $barrier = $self->barrier_init(scalar @$shards); - # each partition needs to issue a barrier command - $_->remote_barrier for @$parts; + # each shard needs to issue a barrier command + $_->remote_barrier for @$shards; - # wait for each Xapian partition + # wait for each Xapian shard $self->barrier_wait($barrier); } else { - $_->remote_commit for @$parts; + $_->remote_commit for @$shards; } # last_commit is special, don't commit these until - # remote partitions are done: + # remote shards are done: $dbh->begin_work; set_last_commits($self); $dbh->commit; @@ -652,30 +645,26 @@ sub done { checkpoint($self); my $mm = delete $self->{mm}; $mm->{dbh}->commit if $mm; - my $parts = delete $self->{idx_parts}; - if ($parts) { - $_->remote_close for @$parts; + my $shards = delete $self->{idx_shards}; + if ($shards) { + $_->remote_close for @$shards; } $self->{over}->disconnect; delete $self->{bnote}; $self->{transact_bytes} = 0; - $self->lock_release if $parts; + $self->lock_release if $shards; $self->{-inbox}->git->cleanup; } sub fill_alternates ($$) { my ($self, $epoch) = @_; - my $pfx = "$self->{-inbox}->{mainrepo}/git"; - my $all = "$self->{-inbox}->{mainrepo}/all.git"; - my @cmd; + my $pfx = "$self->{-inbox}->{inboxdir}/git"; + my $all = "$self->{-inbox}->{inboxdir}/all.git"; + unless (-d $all) { PublicInbox::Import::init_bare($all); } - @cmd = (qw/git config/, "--file=$pfx/$epoch.git/config", - 'include.path', '../../all.git/config'); - PublicInbox::Import::run_die(\@cmd); - my $alt = "$all/objects/info/alternates"; my %alts; my @add; @@ -697,9 +686,12 @@ sub fill_alternates ($$) { sub git_init { my ($self, $epoch) = @_; - my $git_dir = "$self->{-inbox}->{mainrepo}/git/$epoch.git"; + my $git_dir = "$self->{-inbox}->{inboxdir}/git/$epoch.git"; my @cmd = (qw(git init --bare -q), $git_dir); PublicInbox::Import::run_die(\@cmd); + @cmd = (qw/git config/, "--file=$git_dir/config", + 'include.path', '../../all.git/config'); + PublicInbox::Import::run_die(\@cmd); fill_alternates($self, $epoch); $git_dir } @@ -707,7 +699,7 @@ sub git_init { sub git_dir_latest { my ($self, $max) = @_; $$max = -1; - my $pfx = "$self->{-inbox}->{mainrepo}/git"; + my $pfx = "$self->{-inbox}->{inboxdir}/git"; return unless -d $pfx; my $latest; opendir my $dh, $pfx or die "opendir $pfx: $!\n"; @@ -827,8 +819,8 @@ sub atfork_child { my ($self) = @_; my $fh = delete $self->{reindex_pipe}; close $fh if $fh; - if (my $parts = $self->{idx_parts}) { - $_->atfork_child foreach @$parts; + if (my $shards = $self->{idx_shards}) { + $_->atfork_child foreach @$shards; } if (my $im = $self->{im}) { $im->atfork_child; @@ -930,22 +922,22 @@ sub reindex_oid ($$$$) { # only update last_commit for $i on reindex iff newer than current sub update_last_commit ($$$$) { my ($self, $git, $i, $cmt) = @_; - my $last = last_commit_part($self, $i); + my $last = last_epoch_commit($self, $i); if (defined $last && is_ancestor($git, $last, $cmt)) { my @cmd = (qw(rev-list --count), "$last..$cmt"); chomp(my $n = $git->qx(@cmd)); return if $n ne '' && $n == 0; } - last_commit_part($self, $i, $cmt); + last_epoch_commit($self, $i, $cmt); } -sub git_dir_n ($$) { "$_[0]->{-inbox}->{mainrepo}/git/$_[1].git" } +sub git_dir_n ($$) { "$_[0]->{-inbox}->{inboxdir}/git/$_[1].git" } sub last_commits ($$) { my ($self, $epoch_max) = @_; my $heads = []; for (my $i = $epoch_max; $i >= 0; $i--) { - $heads->[$i] = last_commit_part($self, $i); + $heads->[$i] = last_epoch_commit($self, $i); } $heads; } @@ -1016,7 +1008,7 @@ sub sync_prepare ($$$) { for (my $i = $epoch_max; $i >= 0; $i--) { die 'BUG: already indexing!' if $self->{reindex_pipe}; my $git_dir = git_dir_n($self, $i); - -d $git_dir or next; # missing parts are fine + -d $git_dir or next; # missing epochs are fine my $git = PublicInbox::Git->new($git_dir); if ($reindex_heads) { $head = $reindex_heads->[$i] or next; @@ -1051,7 +1043,7 @@ sub sync_prepare ($$$) { sub unindex_oid_remote ($$$) { my ($self, $oid, $mid) = @_; - $_->remote_remove($oid, $mid) foreach @{$self->{idx_parts}}; + $_->remote_remove($oid, $mid) foreach @{$self->{idx_shards}}; $self->{over}->remove_oid($oid, $mid); } @@ -1076,7 +1068,7 @@ sub unindex_oid ($$$) { join(',',sort keys %gone), "\n"; } foreach my $num (keys %gone) { - $self->{unindexed}->{$_}++; + $self->{unindexed}->{$num}++; $self->{mm}->num_delete($num); } unindex_oid_remote($self, $oid, $mid); @@ -1126,7 +1118,7 @@ sub index_epoch ($$$) { my $git_dir = git_dir_n($self, $i); die 'BUG: already reindexing!' if $self->{reindex_pipe}; - -d $git_dir or return; # missing parts are fine + -d $git_dir or return; # missing epochs are fine fill_alternates($self, $i); my $git = PublicInbox::Git->new($git_dir); if (my $unindex_range = delete $sync->{unindex_range}->{$i}) {