X-Git-Url: http://www.git.stargrave.org/?a=blobdiff_plain;f=lib%2FPublicInbox%2FV2Writable.pm;h=2cc87305668256709f773415f750e5f52653503c;hb=017fed7bc4d33ac474a19356994be5bd0bfe68ba;hp=ea116f1a994be193fbb0acee42aad6dd5d94d71e;hpb=5e8be4aa1e6adb321e2b85f8f96ad1e2646c093a;p=public-inbox.git diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm index ea116f1a..2cc87305 100644 --- a/lib/PublicInbox/V2Writable.pm +++ b/lib/PublicInbox/V2Writable.pm @@ -13,17 +13,21 @@ use PublicInbox::Import; use PublicInbox::MID qw(mids); use PublicInbox::ContentId qw(content_id content_digest); use PublicInbox::Inbox; -use PublicInbox::OverIdxFork; +use PublicInbox::OverIdx; use PublicInbox::Msgmap; -use PublicInbox::Spawn; +use PublicInbox::Spawn qw(spawn); +use PublicInbox::SearchIdx; use IO::Handle; # an estimate of the post-packed size to the raw uncompressed size my $PACKING_FACTOR = 0.4; # assume 2 cores if GNU nproc(1) is not available -sub nproc () { - int($ENV{NPROC} || `nproc 2>/dev/null` || 2); +sub nproc_parts () { + my $n = int($ENV{NPROC} || `nproc 2>/dev/null` || 2); + # subtract for the main process and git-fast-import + $n -= 1; + $n < 1 ? 1 : $n; } sub count_partitions ($) { @@ -67,13 +71,13 @@ sub new { parallel => 1, transact_bytes => 0, xpfx => $xpfx, - over => PublicInbox::OverIdxFork->new("$xpfx/over.sqlite3"), + over => PublicInbox::OverIdx->new("$xpfx/over.sqlite3", 1), lock_path => "$dir/inbox.lock", - # limit each repo to 1GB or so + # limit each git repo (epoch) to 1GB or so rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR), last_commit => [], # git repo -> commit }; - $self->{partitions} = count_partitions($self) || nproc(); + $self->{partitions} = count_partitions($self) || nproc_parts(); bless $self, $class; } @@ -81,9 +85,9 @@ sub init_inbox { my ($self, $parallel) = @_; $self->{parallel} = $parallel; $self->idx_init; - my $max_git = -1; - git_dir_latest($self, \$max_git); - $self->git_init($max_git >= 0 ? $max_git : 0); + my $epoch_max = -1; + git_dir_latest($self, \$epoch_max); + $self->git_init($epoch_max >= 0 ? $epoch_max : 0); $self->done; } @@ -111,11 +115,12 @@ sub add { my $im = $self->importer; my $cmt = $im->add($mime); $cmt = $im->get_mark($cmt); - my ($oid, $len, $msgref) = @{$im->{last_object}}; + $self->{last_commit}->[$self->{epoch_max}] = $cmt; + my ($oid, $len, $msgref) = @{$im->{last_object}}; + $self->{over}->add_overview($mime, $len, $num, $oid, $mid0); my $nparts = $self->{partitions}; my $part = $num % $nparts; - $self->{last_commit}->[$self->{max_git}] = $cmt; my $idx = $self->idx_part($part); $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime); my $n = $self->{transact_bytes} += $len; @@ -138,7 +143,6 @@ sub num_for { }; # crap, Message-ID is already known, hope somebody just resent: - $self->barrier; foreach my $m (@$mids) { # read-only lookup now safe to do after above barrier my $existing = $self->lookup_content($mime, $m); @@ -171,19 +175,19 @@ sub num_for_harder { my $hdr = $mime->header_obj; my $dig = content_digest($mime); - $$mid0 = PublicInbox::Import::digest2mid($dig); + $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr); my $num = $self->{mm}->mid_insert($$mid0); unless (defined $num) { # it's hard to spoof the last Received: header my @recvd = $hdr->header_raw('Received'); $dig->add("Received: $_") foreach (@recvd); - $$mid0 = PublicInbox::Import::digest2mid($dig); + $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr); $num = $self->{mm}->mid_insert($$mid0); # fall back to a random Message-ID and give up determinism: until (defined($num)) { $dig->add(rand); - $$mid0 = PublicInbox::Import::digest2mid($dig); + $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr); warn "using random Message-ID <$$mid0> as fallback\n"; $num = $self->{mm}->mid_insert($$mid0); } @@ -208,11 +212,17 @@ sub idx_init { # frequently activated. delete $ibx->{$_} foreach (qw(git mm search)); + if ($self->{parallel}) { + pipe(my ($r, $w)) or die "pipe failed: $!"; + $self->{bnote} = [ $r, $w ]; + $w->autoflush(1); + } + my $over = $self->{over}; $ibx->umask_prepare; $ibx->with_umask(sub { $self->lock_acquire; - $over->create($self); + $over->create; # -compact can change partition count while -watch is idle my $nparts = count_partitions($self); @@ -242,7 +252,7 @@ sub purge_oids { $self->done; my $pfx = "$self->{-inbox}->{mainrepo}/git"; my $purges = []; - foreach my $i (0..$self->{max_git}) { + foreach my $i (0..$self->{epoch_max}) { my $git = PublicInbox::Git->new("$pfx/$i.git"); my $im = $self->import_init($git, 0, 1); $purges->[$i] = $im->purge_oids($purge); @@ -250,14 +260,32 @@ sub purge_oids { $purges; } +sub content_ids ($) { + my ($mime) = @_; + my @cids = ( content_id($mime) ); + + # Email::MIME->as_string doesn't always round-trip, so we may + # use a second content_id + my $rt = content_id(PublicInbox::MIME->new(\($mime->as_string))); + push @cids, $rt if $cids[0] ne $rt; + \@cids; +} + +sub content_matches ($$) { + my ($cids, $existing) = @_; + my $cid = content_id($existing); + foreach (@$cids) { + return 1 if $_ eq $cid + } + 0 +} + sub remove_internal { my ($self, $mime, $cmt_msg, $purge) = @_; - $self->barrier; $self->idx_init; my $im = $self->importer unless $purge; - my $ibx = $self->{-inbox}; - my $srch = $ibx->search; - my $cid = content_id($mime); + my $over = $self->{over}; + my $cids = content_ids($mime); my $parts = $self->{idx_parts}; my $mm = $self->{mm}; my $removed; @@ -271,22 +299,20 @@ sub remove_internal { foreach my $mid (@$mids) { my %gone; - $srch->reopen->each_smsg_by_mid($mid, sub { - my ($smsg) = @_; - $smsg->load_expand; - my $msg = $ibx->msg_by_smsg($smsg); + my ($id, $prev); + while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) { + my $msg = get_blob($self, $smsg); if (!defined($msg)) { warn "broken smsg for $mid\n"; - return 1; # continue + next; # continue } my $orig = $$msg; my $cur = PublicInbox::MIME->new($msg); - if (content_id($cur) eq $cid) { + if (content_matches($cids, $cur)) { $smsg->{mime} = $cur; - $gone{$smsg->num} = [ $smsg, \$orig ]; + $gone{$smsg->{num}} = [ $smsg, \$orig ]; } - 1; # continue - }); + } my $n = scalar keys %gone; next unless $n; if ($n > 1) { @@ -306,16 +332,13 @@ sub remove_internal { ($mark, undef) = $im->remove($orig, $cmt_msg); } $orig = undef; - foreach my $idx (@$parts) { - $idx->remote_remove($oid, $mid); - } + $self->unindex_oid_remote($oid, $mid); } - $self->barrier; } if (defined $mark) { my $cmt = $im->get_mark($mark); - $self->{last_commit}->[$self->{max_git}] = $cmt; + $self->{last_commit}->[$self->{epoch_max}] = $cmt; } if ($purge && scalar keys %$purge) { return purge_oids($self, $purge); @@ -347,76 +370,65 @@ sub last_commit_part ($$;$) { sub set_last_commits ($) { my ($self) = @_; - defined(my $max_git = $self->{max_git}) or return; + defined(my $epoch_max = $self->{epoch_max}) or return; my $last_commit = $self->{last_commit}; - foreach my $i (0..$max_git) { + foreach my $i (0..$epoch_max) { defined(my $cmt = $last_commit->[$i]) or next; $last_commit->[$i] = undef; last_commit_part($self, $i, $cmt); } } -sub done { - my ($self) = @_; - my $im = delete $self->{im}; - $im->done if $im; # PublicInbox::Import::done - - my $mm = $self->{mm}; - $mm->{dbh}->commit if $mm; - - # order matters, we can only close {over} after all partitions - # are done because the partitions also write to {over} - my $parts = delete $self->{idx_parts}; - if ($parts) { - $_->remote_commit for @$parts; - $_->remote_close for @$parts; - } - - my $over = $self->{over}; - $over->remote_commit; - $over->remote_close; - - if ($mm) { - $mm->{dbh}->begin_work; - set_last_commits($self); - $mm->{dbh}->commit; - delete $self->{mm}; - } - - $self->{transact_bytes} = 0; - $self->lock_release if $parts; +sub barrier_init { + my ($self, $n) = @_; + $self->{bnote} or return; + --$n; + my $barrier = { map { $_ => 1 } (0..$n) }; } -sub checkpoint { - my ($self) = @_; - my $im = $self->{im}; - $im->checkpoint if $im; # PublicInbox::Import::checkpoint - $self->barrier(1); +sub barrier_wait { + my ($self, $barrier) = @_; + my $bnote = $self->{bnote} or return; + my $r = $bnote->[0]; + while (scalar keys %$barrier) { + defined(my $l = $r->getline) or die "EOF on barrier_wait: $!"; + $l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l"; + delete $barrier->{$1} or die "bad part[$1] on barrier wait"; + } } -# issue a write barrier to ensure all data is visible to other processes -# and read-only ops. Order of data importance is: git > SQLite > Xapian -sub barrier { - my ($self, $fsync) = @_; +sub checkpoint ($;$) { + my ($self, $wait) = @_; if (my $im = $self->{im}) { - $im->barrier; + if ($wait) { + $im->barrier; + } else { + $im->checkpoint; + } } my $parts = $self->{idx_parts}; if ($parts) { my $dbh = $self->{mm}->{dbh}; - $dbh->commit; # SQLite msgmap data is second in importance - my $over = $self->{over}; + # SQLite msgmap data is second in importance + $dbh->commit; + + # SQLite overview is third + $self->{over}->commit_lazy; - # Now deal with Xapian and overview DB - $over->barrier_init(scalar(@$parts)); + # Now deal with Xapian + if ($wait) { + my $barrier = $self->barrier_init(scalar @$parts); - # each partition needs to issue a barrier command to over - $_->remote_barrier foreach @$parts; + # each partition needs to issue a barrier command + $_->remote_barrier for @$parts; - $over->barrier_wait; # wait for each Xapian partition - $over->commit_fsync if $fsync; + # wait for each Xapian partition + $self->barrier_wait($barrier); + } else { + $_->remote_commit for @$parts; + } # last_commit is special, don't commit these until # remote partitions are done: @@ -429,10 +441,31 @@ sub barrier { $self->{transact_bytes} = 0; } +# issue a write barrier to ensure all data is visible to other processes +# and read-only ops. Order of data importance is: git > SQLite > Xapian +sub barrier { checkpoint($_[0], 1) }; + +sub done { + my ($self) = @_; + my $im = delete $self->{im}; + $im->done if $im; # PublicInbox::Import::done + checkpoint($self); + my $mm = delete $self->{mm}; + $mm->{dbh}->commit if $mm; + my $parts = delete $self->{idx_parts}; + if ($parts) { + $_->remote_close for @$parts; + } + $self->{over}->disconnect; + delete $self->{bnote}; + $self->{transact_bytes} = 0; + $self->lock_release if $parts; +} + sub git_init { - my ($self, $new) = @_; + my ($self, $epoch) = @_; my $pfx = "$self->{-inbox}->{mainrepo}/git"; - my $git_dir = "$pfx/$new.git"; + my $git_dir = "$pfx/$epoch.git"; my @cmd = (qw(git init --bare -q), $git_dir); PublicInbox::Import::run_die(\@cmd); @@ -450,7 +483,7 @@ sub git_init { PublicInbox::Import::run_die(\@cmd); my $alt = "$all/objects/info/alternates"; - my $new_obj_dir = "../../git/$new.git/objects"; + my $new_obj_dir = "../../git/$epoch.git/objects"; my %alts; if (-e $alt) { open(my $fh, '<', $alt) or die "open < $alt: $!\n"; @@ -489,28 +522,28 @@ sub importer { } else { $self->{im} = undef; $im->done; - $self->barrier(1); $im = undef; - my $git_dir = $self->git_init(++$self->{max_git}); + $self->checkpoint; + my $git_dir = $self->git_init(++$self->{epoch_max}); my $git = PublicInbox::Git->new($git_dir); return $self->import_init($git, 0); } } - my $new = 0; + my $epoch = 0; my $max; my $latest = git_dir_latest($self, \$max); if (defined $latest) { my $git = PublicInbox::Git->new($latest); my $packed_bytes = $git->packed_bytes; if ($packed_bytes >= $self->{rotate_bytes}) { - $new = $max + 1; + $epoch = $max + 1; } else { - $self->{max_git} = $max; + $self->{epoch_max} = $max; return $self->import_init($git, $packed_bytes); } } - $self->{max_git} = $new; - $latest = $self->git_init($new); + $self->{epoch_max} = $epoch; + $latest = $self->git_init($epoch); $self->import_init(PublicInbox::Git->new($latest), 0); } @@ -529,7 +562,6 @@ sub import_init { sub diff ($$$) { my ($mid, $cur, $new) = @_; use File::Temp qw(tempfile); - use PublicInbox::Spawn qw(spawn); my ($ah, $an) = tempfile('email-cur-XXXXXXXX', TMPDIR => 1); print $ah $cur->as_string or die "print: $!"; @@ -546,34 +578,39 @@ sub diff ($$$) { unlink($an, $bn); } -sub lookup_content { - my ($self, $mime, $mid) = @_; +sub get_blob ($$) { + my ($self, $smsg) = @_; + if (my $im = $self->{im}) { + my $msg = $im->cat_blob($smsg->{blob}); + return $msg if $msg; + } + # older message, should be in alternates my $ibx = $self->{-inbox}; + $ibx->msg_by_smsg($smsg); +} - my $srch = $ibx->search->reopen; - my $cid = content_id($mime); - my $found; - $srch->each_smsg_by_mid($mid, sub { - my ($smsg) = @_; - $smsg->load_expand; - my $msg = $ibx->msg_by_smsg($smsg); +sub lookup_content { + my ($self, $mime, $mid) = @_; + my $over = $self->{over}; + my $cids = content_ids($mime); + my ($id, $prev); + while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) { + my $msg = get_blob($self, $smsg); if (!defined($msg)) { warn "broken smsg for $mid\n"; - return 1; # continue + next; } my $cur = PublicInbox::MIME->new($msg); - if (content_id($cur) eq $cid) { + if (content_matches($cids, $cur)) { $smsg->{mime} = $cur; - $found = $smsg; - return 0; # break out of loop + return $smsg; } + # XXX DEBUG_DIFF is experimental and may be removed diff($mid, $cur, $mime) if $ENV{DEBUG_DIFF}; - - 1; # continue - }); - $found; + } + undef; } sub atfork_child { @@ -587,6 +624,8 @@ sub atfork_child { $im->atfork_child; } die "unexpected mm" if $self->{mm}; + close $self->{bnote}->[0] or die "close bnote[0]: $!\n"; + $self->{bnote}->[1]; } sub mark_deleted { @@ -660,6 +699,7 @@ sub reindex_oid { $mm_tmp->mid_delete($mid0) or die "failed to delete <$mid0> for article #$num\n"; + $self->{over}->add_overview($mime, $len, $num, $oid, $mid0); my $nparts = $self->{partitions}; my $part = $num % $nparts; my $idx = $self->idx_part($part); @@ -690,30 +730,21 @@ sub update_last_commit { sub git_dir_n ($$) { "$_[0]->{-inbox}->{mainrepo}/git/$_[1].git" } sub last_commits { - my ($self, $max_git) = @_; + my ($self, $epoch_max) = @_; my $heads = []; - for (my $i = $max_git; $i >= 0; $i--) { + for (my $i = $epoch_max; $i >= 0; $i--) { $heads->[$i] = last_commit_part($self, $i); } $heads; } -sub is_ancestor ($$$) { - my ($git, $cur, $tip) = @_; - return 0 unless $git->check($cur); - my $cmd = [ 'git', "--git-dir=$git->{git_dir}", - qw(merge-base --is-ancestor), $cur, $tip ]; - my $pid = spawn($cmd); - defined $pid or die "spawning ".join(' ', @$cmd)." failed: $!"; - waitpid($pid, 0) == $pid or die join(' ', @$cmd) .' did not finish'; - $? == 0; -} +*is_ancestor = *PublicInbox::SearchIdx::is_ancestor; sub index_prepare { - my ($self, $opts, $max_git, $ranges) = @_; + my ($self, $opts, $epoch_max, $ranges) = @_; my $regen_max = 0; my $head = $self->{-inbox}->{ref_head} || 'refs/heads/master'; - for (my $i = $max_git; $i >= 0; $i--) { + for (my $i = $epoch_max; $i >= 0; $i--) { die "already indexing!\n" if $self->{index_pipe}; my $git_dir = git_dir_n($self, $i); -d $git_dir or next; # missing parts are fine @@ -765,21 +796,26 @@ $range \$regen_max; } +sub unindex_oid_remote { + my ($self, $oid, $mid) = @_; + $_->remote_remove($oid, $mid) foreach @{$self->{idx_parts}}; + $self->{over}->remove_oid($oid, $mid); +} + sub unindex_oid { my ($self, $git, $oid) = @_; my $msgref = $git->cat_file($oid); my $mime = PublicInbox::MIME->new($msgref); my $mids = mids($mime->header_obj); $mime = $msgref = undef; - + my $over = $self->{over}; foreach my $mid (@$mids) { my %gone; - $self->{-inbox}->search->reopen->each_smsg_by_mid($mid, sub { - my ($smsg) = @_; - $smsg->load_expand; - $gone{$smsg->num} = 1 if $oid eq $smsg->{blob}; + my ($id, $prev); + while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) { + $gone{$smsg->{num}} = 1 if $oid eq $smsg->{blob}; 1; # continue - }); + } my $n = scalar keys %gone; next unless $n; if ($n > 1) { @@ -787,9 +823,7 @@ sub unindex_oid { join(',',sort keys %gone), "\n"; } $self->{unindexed}->{$_}++ foreach keys %gone; - $_->remote_remove($oid, $mid) foreach @{$self->{idx_parts}}; - $self->{over}->remove_oid($oid, $mid); - $self->barrier; + $self->unindex_oid_remote($oid, $mid); } } @@ -797,7 +831,6 @@ my $x40 = qr/[a-f0-9]{40}/; sub unindex { my ($self, $opts, $git, $unindex_range) = @_; my $un = $self->{unindexed} ||= {}; # num => removal count - $self->barrier; my $before = scalar keys %$un; my @cmd = qw(log --raw -r --no-notes --no-color --no-abbrev --no-renames); @@ -821,16 +854,15 @@ sub unindex { sub index_sync { my ($self, $opts) = @_; $opts ||= {}; - my $ibx = $self->{-inbox}; - my $max_git; - my $latest = git_dir_latest($self, \$max_git); + my $epoch_max; + my $latest = git_dir_latest($self, \$epoch_max); return unless defined $latest; $self->idx_init; # acquire lock my $mm_tmp = $self->{mm}->tmp_clone; - my $ranges = $opts->{reindex} ? [] : $self->last_commits($max_git); + my $ranges = $opts->{reindex} ? [] : $self->last_commits($epoch_max); my ($min, $max) = $mm_tmp->minmax; - my $regen = $self->index_prepare($opts, $max_git, $ranges); + my $regen = $self->index_prepare($opts, $epoch_max, $ranges); $$regen += $max if $max; my $D = {}; my @cmd = qw(log --raw -r --pretty=tformat:%h @@ -838,7 +870,7 @@ sub index_sync { # work backwards through history my $last_commit = []; - for (my $i = $max_git; $i >= 0; $i--) { + for (my $i = $epoch_max; $i >= 0; $i--) { my $git_dir = git_dir_n($self, $i); die "already reindexing!\n" if delete $self->{reindex_pipe}; -d $git_dir or next; # missing parts are fine