1 # Copyright (C) 2018 all contributors <meta@public-inbox.org>
2 # License: AGPL-3.0+ <https://www.gnu.org/licenses/agpl-3.0.txt>
4 # This interface wraps and mimics PublicInbox::Import
5 # Used to write to V2 inboxes (see L<public-inbox-v2-format(5)>).
6 package PublicInbox::V2Writable;
9 use base qw(PublicInbox::Lock);
10 use PublicInbox::SearchIdxShard;
11 use PublicInbox::MIME;
13 use PublicInbox::Import;
14 use PublicInbox::MID qw(mids references);
15 use PublicInbox::ContentId qw(content_id content_digest);
16 use PublicInbox::Inbox;
17 use PublicInbox::OverIdx;
18 use PublicInbox::Msgmap;
19 use PublicInbox::Spawn qw(spawn);
20 use PublicInbox::SearchIdx;
23 # an estimate of the post-packed size to the raw uncompressed size
24 my $PACKING_FACTOR = 0.4;
26 # SATA storage lags behind what CPUs are capable of, so relying on
27 # nproc(1) can be misleading and having extra Xapian shards is a
28 # waste of FDs and space. It can also lead to excessive IO latency
29 # and slow things down. Users on NVME or other fast storage can
30 # use the NPROC env or switches in our script/public-inbox-* programs
31 # to increase Xapian shards
32 our $NPROC_MAX_DEFAULT = 4;
34 sub nproc_shards ($) {
36 if (ref($creat_opt) eq 'HASH') {
37 if (defined(my $n = $creat_opt->{nproc})) {
44 chomp($n = `nproc 2>/dev/null`);
45 # assume 2 cores if GNU nproc(1) is not available
47 $n = $NPROC_MAX_DEFAULT if $n > $NPROC_MAX_DEFAULT;
50 # subtract for the main process and git-fast-import
55 sub count_shards ($) {
58 my $xpfx = $self->{xpfx};
60 # always load existing shards in case core count changes:
61 # Also, shard count may change while -watch is running
62 # due to "xcpdb --reshard"
64 foreach my $shard (<$xpfx/*>) {
65 -d $shard && $shard =~ m!/[0-9]+\z! or next;
67 Search::Xapian::Database->new($shard)->close;
76 # $creat may be any true value, or 0/undef. A hashref is true,
77 # and $creat->{nproc} may be set to an integer
78 my ($class, $v2ibx, $creat) = @_;
79 my $dir = $v2ibx->{mainrepo} or die "no mainrepo in inbox\n";
83 File::Path::mkpath($dir);
85 die "$dir does not exist\n";
89 $v2ibx = PublicInbox::InboxWritable->new($v2ibx);
90 $v2ibx->umask_prepare;
92 my $xpfx = "$dir/xap" . PublicInbox::Search::SCHEMA_VERSION;
95 im => undef, # PublicInbox::Import
100 over => PublicInbox::OverIdx->new("$xpfx/over.sqlite3", 1),
101 lock_path => "$dir/inbox.lock",
102 # limit each git repo (epoch) to 1GB or so
103 rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR),
104 last_commit => [], # git repo -> commit
106 $self->{shards} = count_shards($self) || nproc_shards($creat);
112 my ($self, $parallel, $skip_epoch) = @_;
113 $self->{parallel} = $parallel;
116 git_dir_latest($self, \$epoch_max);
117 if (defined $skip_epoch && $epoch_max == -1) {
118 $epoch_max = $skip_epoch;
120 $self->git_init($epoch_max >= 0 ? $epoch_max : 0);
124 # returns undef on duplicate or spam
125 # mimics Import::add and wraps it for v2
127 my ($self, $mime, $check_cb) = @_;
128 $self->{-inbox}->with_umask(sub {
129 _add($self, $mime, $check_cb)
133 # indexes a message, returns true if checkpointing is needed
134 sub do_idx ($$$$$$$) {
135 my ($self, $msgref, $mime, $len, $num, $oid, $mid0) = @_;
136 $self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
137 my $idx = idx_shard($self, $num % $self->{shards});
138 $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime);
139 my $n = $self->{transact_bytes} += $len;
140 $n >= (PublicInbox::SearchIdx::BATCH_BYTES * $self->{shards});
144 my ($self, $mime, $check_cb) = @_;
148 $mime = $check_cb->($mime) or return;
151 # All pipes (> $^F) known to Perl 5.6+ have FD_CLOEXEC set,
152 # as does SQLite 3.4.1+ (released in 2007-07-20), and
153 # Xapian 1.3.2+ (released 2015-03-15).
154 # For the most part, we can spawn git-fast-import without
155 # leaking FDs to it...
159 my $num = num_for($self, $mime, \$mid0);
160 defined $num or return; # duplicate
161 defined $mid0 or die "BUG: $mid0 undefined\n";
162 my $im = $self->importer;
163 my $cmt = $im->add($mime);
164 $cmt = $im->get_mark($cmt);
165 $self->{last_commit}->[$self->{epoch_max}] = $cmt;
167 my ($oid, $len, $msgref) = @{$im->{last_object}};
168 if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
176 my ($self, $mime, $mid0) = @_;
177 my $mids = mids($mime->header_obj);
179 my $mid = $mids->[0];
180 my $num = $self->{mm}->mid_insert($mid);
181 if (defined $num) { # common case
186 # crap, Message-ID is already known, hope somebody just resent:
187 foreach my $m (@$mids) {
188 # read-only lookup now safe to do after above barrier
189 my $existing = lookup_content($self, $mime, $m);
190 # easy, don't store duplicates
191 # note: do not add more diagnostic info here since
192 # it gets noisy on public-inbox-watch restarts
196 # AltId may pre-populate article numbers (e.g. X-Mail-Count
197 # or NNTP article number), use that article number if it's
199 my $altid = $self->{-inbox}->{altid};
200 if ($altid && grep(/:file=msgmap\.sqlite3\z/, @$altid)) {
201 my $num = $self->{mm}->num_for($mid);
203 if (defined $num && !$self->{over}->get_art($num)) {
210 warn "<$mid> reused for mismatched content\n";
212 # try the rest of the mids
213 for(my $i = $#$mids; $i >= 1; $i--) {
215 $num = $self->{mm}->mid_insert($m);
217 warn "alternative <$m> for <$mid> found\n";
223 # none of the existing Message-IDs are good, generate a new one:
224 num_for_harder($self, $mime, $mid0);
228 my ($self, $mime, $mid0) = @_;
230 my $hdr = $mime->header_obj;
231 my $dig = content_digest($mime);
232 $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
233 my $num = $self->{mm}->mid_insert($$mid0);
234 unless (defined $num) {
235 # it's hard to spoof the last Received: header
236 my @recvd = $hdr->header_raw('Received');
237 $dig->add("Received: $_") foreach (@recvd);
238 $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
239 $num = $self->{mm}->mid_insert($$mid0);
241 # fall back to a random Message-ID and give up determinism:
242 until (defined($num)) {
244 $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
245 warn "using random Message-ID <$$mid0> as fallback\n";
246 $num = $self->{mm}->mid_insert($$mid0);
249 PublicInbox::Import::append_mid($hdr, $$mid0);
254 my ($self, $shard_i) = @_;
255 $self->{idx_shards}->[$shard_i];
260 my ($self, $opt) = @_;
261 return if $self->{idx_shards};
262 my $ibx = $self->{-inbox};
264 # do not leak read-only FDs to child processes, we only have these
265 # FDs for duplicate detection so they should not be
266 # frequently activated.
267 delete $ibx->{$_} foreach (qw(git mm search));
269 my $indexlevel = $ibx->{indexlevel};
270 if ($indexlevel && $indexlevel eq 'basic') {
271 $self->{parallel} = 0;
274 if ($self->{parallel}) {
275 pipe(my ($r, $w)) or die "pipe failed: $!";
276 # pipe for barrier notifications doesn't need to be big,
278 fcntl($w, 1031, 4096) if $^O eq 'linux';
279 $self->{bnote} = [ $r, $w ];
283 my $over = $self->{over};
285 $ibx->with_umask(sub {
286 $self->lock_acquire unless ($opt && $opt->{-skip_lock});
289 # xcpdb can change shard count while -watch is idle
290 my $nshards = count_shards($self);
291 if ($nshards && $nshards != $self->{shards}) {
292 $self->{shards} = $nshards;
295 # need to create all shards before initializing msgmap FD
296 my $max = $self->{shards} - 1;
298 # idx_shards must be visible to all forked processes
299 my $idx = $self->{idx_shards} = [];
300 for my $i (0..$max) {
301 push @$idx, PublicInbox::SearchIdxShard->new($self, $i);
304 # Now that all subprocesses are up, we can open the FDs
306 my $mm = $self->{mm} = PublicInbox::Msgmap->new_file(
307 "$self->{-inbox}->{mainrepo}/msgmap.sqlite3", 1);
308 $mm->{dbh}->begin_work;
312 # returns an array mapping [ epoch => latest_commit ]
313 # latest_commit may be undef if nothing was done to that epoch
314 # $replace_map = { $object_id => $strref, ... }
315 sub _replace_oids ($$$) {
316 my ($self, $mime, $replace_map) = @_;
318 my $pfx = "$self->{-inbox}->{mainrepo}/git";
319 my $rewrites = []; # epoch => commit
320 my $max = $self->{epoch_max};
322 unless (defined($max)) {
323 defined(my $latest = git_dir_latest($self, \$max)) or return;
324 $self->{epoch_max} = $max;
327 foreach my $i (0..$max) {
328 my $git_dir = "$pfx/$i.git";
330 my $git = PublicInbox::Git->new($git_dir);
331 my $im = $self->import_init($git, 0, 1);
332 $rewrites->[$i] = $im->replace_oids($mime, $replace_map);
338 sub content_ids ($) {
340 my @cids = ( content_id($mime) );
342 # Email::MIME->as_string doesn't always round-trip, so we may
343 # use a second content_id
344 my $rt = content_id(PublicInbox::MIME->new(\($mime->as_string)));
345 push @cids, $rt if $cids[0] ne $rt;
349 sub content_matches ($$) {
350 my ($cids, $existing) = @_;
351 my $cid = content_id($existing);
353 return 1 if $_ eq $cid
358 # used for removing or replacing (purging)
359 sub rewrite_internal ($$;$$$) {
360 my ($self, $old_mime, $cmt_msg, $new_mime, $sref) = @_;
362 my ($im, $need_reindex, $replace_map);
364 $replace_map = {}; # oid => sref
365 $need_reindex = [] if $new_mime;
367 $im = $self->importer;
369 my $over = $self->{over};
370 my $cids = content_ids($old_mime);
372 my $mids = mids($old_mime->header_obj);
374 # We avoid introducing new blobs into git since the raw content
375 # can be slightly different, so we do not need the user-supplied
376 # message now that we have the mids and content_id
380 foreach my $mid (@$mids) {
381 my %gone; # num => [ smsg, raw ]
383 while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
384 my $msg = get_blob($self, $smsg);
385 if (!defined($msg)) {
386 warn "broken smsg for $mid\n";
390 my $cur = PublicInbox::MIME->new($msg);
391 if (content_matches($cids, $cur)) {
392 $smsg->{mime} = $cur;
393 $gone{$smsg->{num}} = [ $smsg, \$orig ];
396 my $n = scalar keys %gone;
399 warn "BUG: multiple articles linked to <$mid>\n",
400 join(',', sort keys %gone), "\n";
402 foreach my $num (keys %gone) {
403 my ($smsg, $orig) = @{$gone{$num}};
404 # $removed should only be set once assuming
405 # no bugs in our deduplication code:
407 my $oid = $smsg->{blob};
409 $replace_map->{$oid} = $sref;
411 ($mark, undef) = $im->remove($orig, $cmt_msg);
414 if ($need_reindex) { # ->replace
415 push @$need_reindex, $smsg;
416 } else { # ->purge or ->remove
417 $self->{mm}->num_delete($num);
419 unindex_oid_remote($self, $oid, $mid);
424 my $cmt = $im->get_mark($mark);
425 $self->{last_commit}->[$self->{epoch_max}] = $cmt;
427 if ($replace_map && scalar keys %$replace_map) {
428 my $rewrites = _replace_oids($self, $new_mime, $replace_map);
429 return { rewrites => $rewrites, need_reindex => $need_reindex };
436 my ($self, $mime, $cmt_msg) = @_;
437 $self->{-inbox}->with_umask(sub {
438 rewrite_internal($self, $mime, $cmt_msg);
442 sub _replace ($$;$$) {
443 my ($self, $old_mime, $new_mime, $sref) = @_;
444 my $rewritten = $self->{-inbox}->with_umask(sub {
445 rewrite_internal($self, $old_mime, undef, $new_mime, $sref);
448 my $rewrites = $rewritten->{rewrites};
449 # ->done is called if there are rewrites since we gc+prune from git
450 $self->idx_init if @$rewrites;
452 for my $i (0..$#$rewrites) {
453 defined(my $cmt = $rewrites->[$i]) or next;
454 $self->{last_commit}->[$i] = $cmt;
461 my ($self, $mime) = @_;
462 my $rewritten = _replace($self, $mime, undef, \'') or return;
463 $rewritten->{rewrites}
466 # returns the git object_id of $fh, does not write the object to FS
467 sub git_hash_raw ($$) {
468 my ($self, $raw) = @_;
469 # grab the expected OID we have to reindex:
470 open my $tmp_fh, '+>', undef or die "failed to open tmp: $!";
471 $tmp_fh->autoflush(1);
472 print $tmp_fh $$raw or die "print \$tmp_fh: $!";
473 sysseek($tmp_fh, 0, 0) or die "seek failed: $!";
476 pipe($r, $w) or die "failed to create pipe: $!";
477 my $rdr = { 0 => fileno($tmp_fh), 1 => fileno($w) };
478 my $git_dir = $self->{-inbox}->git->{git_dir};
479 my $cmd = ['git', "--git-dir=$git_dir", qw(hash-object --stdin)];
480 my $pid = spawn($cmd, undef, $rdr);
483 chomp(my $oid = <$r>);
484 waitpid($pid, 0) == $pid or die "git hash-object did not finish";
485 die "git hash-object failed: $?" if $?;
486 $oid =~ /\A[a-f0-9]{40}\z/ or die "OID not expected: $oid";
490 sub _check_mids_match ($$$) {
491 my ($old_list, $new_list, $hdrs) = @_;
492 my %old_mids = map { $_ => 1 } @$old_list;
493 my %new_mids = map { $_ => 1 } @$new_list;
494 my @old = keys %old_mids;
495 my @new = keys %new_mids;
496 my $err = "$hdrs may not be changed when replacing\n";
497 die $err if scalar(@old) != scalar(@new);
498 delete @new_mids{@old};
499 delete @old_mids{@new};
500 die $err if (scalar(keys %old_mids) || scalar(keys %new_mids));
503 # Changing Message-IDs or References with ->replace isn't supported.
504 # The rules for dealing with messages with multiple or conflicting
505 # Message-IDs are pretty complex and rethreading hasn't been fully
507 sub check_mids_match ($$) {
508 my ($old_mime, $new_mime) = @_;
509 my $old = $old_mime->header_obj;
510 my $new = $new_mime->header_obj;
511 _check_mids_match(mids($old), mids($new), 'Message-ID(s)');
512 _check_mids_match(references($old), references($new),
513 'References/In-Reply-To');
518 my ($self, $old_mime, $new_mime) = @_;
520 check_mids_match($old_mime, $new_mime);
522 # mutt will always add Content-Length:, Status:, Lines: when editing
523 PublicInbox::Import::drop_unwanted_headers($new_mime);
525 my $raw = $new_mime->as_string;
526 my $expect_oid = git_hash_raw($self, \$raw);
527 my $rewritten = _replace($self, $old_mime, $new_mime, \$raw) or return;
528 my $need_reindex = $rewritten->{need_reindex};
530 # just in case we have bugs in deduplication code:
531 my $n = scalar(@$need_reindex);
533 my $list = join(', ', map {
534 "$_->{num}: <$_->{mid}>"
537 W: rewritten $n messages matching content of original message (expected: 1).
538 W: possible bug in public-inbox, NNTP article IDs and Message-IDs follow:
543 # make sure we really got the OID:
544 my ($oid, $type, $len) = $self->{-inbox}->git->check($expect_oid);
545 $oid eq $expect_oid or die "BUG: $expect_oid not found after replace";
547 # don't leak FDs to Xapian:
548 $self->{-inbox}->git->cleanup;
550 # reindex modified messages:
551 for my $smsg (@$need_reindex) {
552 my $num = $smsg->{num};
553 my $mid0 = $smsg->{mid};
554 do_idx($self, \$raw, $new_mime, $len, $num, $oid, $mid0);
556 $rewritten->{rewrites};
559 sub last_epoch_commit ($$;$) {
560 my ($self, $i, $cmt) = @_;
561 my $v = PublicInbox::Search::SCHEMA_VERSION();
562 $self->{mm}->last_commit_xap($v, $i, $cmt);
565 sub set_last_commits ($) {
567 defined(my $epoch_max = $self->{epoch_max}) or return;
568 my $last_commit = $self->{last_commit};
569 foreach my $i (0..$epoch_max) {
570 defined(my $cmt = $last_commit->[$i]) or next;
571 $last_commit->[$i] = undef;
572 last_epoch_commit($self, $i, $cmt);
578 $self->{bnote} or return;
580 my $barrier = { map { $_ => 1 } (0..$n) };
584 my ($self, $barrier) = @_;
585 my $bnote = $self->{bnote} or return;
587 while (scalar keys %$barrier) {
588 defined(my $l = $r->getline) or die "EOF on barrier_wait: $!";
589 $l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l";
590 delete $barrier->{$1} or die "bad shard[$1] on barrier wait";
595 sub checkpoint ($;$) {
596 my ($self, $wait) = @_;
598 if (my $im = $self->{im}) {
605 my $shards = $self->{idx_shards};
607 my $dbh = $self->{mm}->{dbh};
609 # SQLite msgmap data is second in importance
612 # SQLite overview is third
613 $self->{over}->commit_lazy;
615 # Now deal with Xapian
617 my $barrier = $self->barrier_init(scalar @$shards);
619 # each shard needs to issue a barrier command
620 $_->remote_barrier for @$shards;
622 # wait for each Xapian shard
623 $self->barrier_wait($barrier);
625 $_->remote_commit for @$shards;
628 # last_commit is special, don't commit these until
629 # remote shards are done:
631 set_last_commits($self);
636 $self->{transact_bytes} = 0;
639 # issue a write barrier to ensure all data is visible to other processes
640 # and read-only ops. Order of data importance is: git > SQLite > Xapian
642 sub barrier { checkpoint($_[0], 1) };
647 my $im = delete $self->{im};
648 $im->done if $im; # PublicInbox::Import::done
650 my $mm = delete $self->{mm};
651 $mm->{dbh}->commit if $mm;
652 my $shards = delete $self->{idx_shards};
654 $_->remote_close for @$shards;
656 $self->{over}->disconnect;
657 delete $self->{bnote};
658 $self->{transact_bytes} = 0;
659 $self->lock_release if $shards;
660 $self->{-inbox}->git->cleanup;
663 sub fill_alternates ($$) {
664 my ($self, $epoch) = @_;
666 my $pfx = "$self->{-inbox}->{mainrepo}/git";
667 my $all = "$self->{-inbox}->{mainrepo}/all.git";
670 PublicInbox::Import::init_bare($all);
672 @cmd = (qw/git config/, "--file=$pfx/$epoch.git/config",
673 'include.path', '../../all.git/config');
674 PublicInbox::Import::run_die(\@cmd);
676 my $alt = "$all/objects/info/alternates";
680 open(my $fh, '<', $alt) or die "open < $alt: $!\n";
681 %alts = map { chomp; $_ => 1 } (<$fh>);
683 foreach my $i (0..$epoch) {
684 my $dir = "../../git/$i.git/objects";
685 push @add, $dir if !$alts{$dir} && -d "$pfx/$i.git";
688 open my $fh, '>>', $alt or die "open >> $alt: $!\n";
689 foreach my $dir (@add) {
690 print $fh "$dir\n" or die "print >> $alt: $!\n";
692 close $fh or die "close $alt: $!\n";
696 my ($self, $epoch) = @_;
697 my $git_dir = "$self->{-inbox}->{mainrepo}/git/$epoch.git";
698 my @cmd = (qw(git init --bare -q), $git_dir);
699 PublicInbox::Import::run_die(\@cmd);
700 fill_alternates($self, $epoch);
705 my ($self, $max) = @_;
707 my $pfx = "$self->{-inbox}->{mainrepo}/git";
708 return unless -d $pfx;
710 opendir my $dh, $pfx or die "opendir $pfx: $!\n";
711 while (defined(my $git_dir = readdir($dh))) {
712 $git_dir =~ m!\A([0-9]+)\.git\z! or next;
715 $latest = "$pfx/$git_dir";
723 my $im = $self->{im};
725 if ($im->{bytes_added} < $self->{rotate_bytes}) {
732 my $git_dir = $self->git_init(++$self->{epoch_max});
733 my $git = PublicInbox::Git->new($git_dir);
734 return $self->import_init($git, 0);
739 my $latest = git_dir_latest($self, \$max);
740 if (defined $latest) {
741 my $git = PublicInbox::Git->new($latest);
742 my $packed_bytes = $git->packed_bytes;
743 my $unpacked_bytes = $packed_bytes / $PACKING_FACTOR;
745 if ($unpacked_bytes >= $self->{rotate_bytes}) {
748 $self->{epoch_max} = $max;
749 return $self->import_init($git, $packed_bytes);
752 $self->{epoch_max} = $epoch;
753 $latest = $self->git_init($epoch);
754 $self->import_init(PublicInbox::Git->new($latest), 0);
758 my ($self, $git, $packed_bytes, $tmp) = @_;
759 my $im = PublicInbox::Import->new($git, undef, undef, $self->{-inbox});
760 $im->{bytes_added} = int($packed_bytes / $PACKING_FACTOR);
761 $im->{want_object_info} = 1;
762 $im->{lock_path} = undef;
763 $im->{path_type} = 'v2';
764 $self->{im} = $im unless $tmp;
770 my ($mid, $cur, $new) = @_;
771 use File::Temp qw(tempfile);
773 my ($ah, $an) = tempfile('email-cur-XXXXXXXX', TMPDIR => 1);
774 print $ah $cur->as_string or die "print: $!";
775 close $ah or die "close: $!";
776 my ($bh, $bn) = tempfile('email-new-XXXXXXXX', TMPDIR => 1);
777 PublicInbox::Import::drop_unwanted_headers($new);
778 print $bh $new->as_string or die "print: $!";
779 close $bh or die "close: $!";
780 my $cmd = [ qw(diff -u), $an, $bn ];
781 print STDERR "# MID conflict <$mid>\n";
782 my $pid = spawn($cmd, undef, { 1 => 2 });
783 defined $pid or die "diff failed to spawn $!";
784 waitpid($pid, 0) == $pid or die "diff did not finish";
789 my ($self, $smsg) = @_;
790 if (my $im = $self->{im}) {
791 my $msg = $im->cat_blob($smsg->{blob});
794 # older message, should be in alternates
795 my $ibx = $self->{-inbox};
796 $ibx->msg_by_smsg($smsg);
799 sub lookup_content ($$$) {
800 my ($self, $mime, $mid) = @_;
801 my $over = $self->{over};
802 my $cids = content_ids($mime);
804 while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
805 my $msg = get_blob($self, $smsg);
806 if (!defined($msg)) {
807 warn "broken smsg for $mid\n";
810 my $cur = PublicInbox::MIME->new($msg);
811 if (content_matches($cids, $cur)) {
812 $smsg->{mime} = $cur;
817 # XXX DEBUG_DIFF is experimental and may be removed
818 diff($mid, $cur, $mime) if $ENV{DEBUG_DIFF};
825 my $fh = delete $self->{reindex_pipe};
827 if (my $shards = $self->{idx_shards}) {
828 $_->atfork_child foreach @$shards;
830 if (my $im = $self->{im}) {
833 die "unexpected mm" if $self->{mm};
834 close $self->{bnote}->[0] or die "close bnote[0]: $!\n";
838 sub mark_deleted ($$$$) {
839 my ($self, $sync, $git, $oid) = @_;
840 my $msgref = $git->cat_file($oid);
841 my $mime = PublicInbox::MIME->new($$msgref);
842 my $mids = mids($mime->header_obj);
843 my $cid = content_id($mime);
844 foreach my $mid (@$mids) {
845 $sync->{D}->{"$mid\0$cid"} = $oid;
849 sub reindex_oid ($$$$) {
850 my ($self, $sync, $git, $oid) = @_;
852 my $msgref = $git->cat_file($oid, \$len);
853 my $mime = PublicInbox::MIME->new($$msgref);
854 my $mids = mids($mime->header_obj);
855 my $cid = content_id($mime);
857 # get the NNTP article number we used before, highest number wins
858 # and gets deleted from sync->{mm_tmp};
862 foreach my $mid (@$mids) {
863 $del += delete($sync->{D}->{"$mid\0$cid"}) ? 1 : 0;
864 my $n = $sync->{mm_tmp}->num_for($mid);
865 if (defined $n && $n > $num) {
868 $self->{mm}->mid_set($num, $mid0);
871 if (!defined($mid0) && !$del) {
872 $num = $sync->{regen}--;
873 die "BUG: ran out of article numbers\n" if $num <= 0;
874 my $mm = $self->{mm};
875 foreach my $mid (reverse @$mids) {
876 if ($mm->mid_set($num, $mid) == 1) {
881 if (!defined($mid0)) {
882 my $id = '<' . join('> <', @$mids) . '>';
883 warn "Message-ID $id unusable for $num\n";
884 foreach my $mid (@$mids) {
885 defined(my $n = $mm->num_for($mid)) or next;
886 warn "#$n previously mapped for <$mid>\n";
891 if (!defined($mid0) || $del) {
892 if (!defined($mid0) && $del) { # expected for deletes
893 $num = $sync->{regen}--;
894 $self->{mm}->num_highwater($num) if !$sync->{reindex};
898 my $id = '<' . join('> <', @$mids) . '>';
900 warn "Skipping $id, no article number found\n";
901 if ($del && defined($mid0)) {
902 warn "$id was deleted $del " .
903 "time(s) but mapped to article #$num\n";
908 $sync->{mm_tmp}->mid_delete($mid0) or
909 die "failed to delete <$mid0> for article #$num\n";
911 if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
913 $sync->{mm_tmp}->atfork_prepare;
914 $self->done; # release lock
916 if (my $pr = $sync->{-opt}->{-progress}) {
917 my ($bn) = (split('/', $git->{git_dir}))[-1];
918 $pr->("$bn ".sprintf($sync->{-regen_fmt}, $sync->{nr}));
921 # allow -watch or -mda to write...
922 $self->idx_init; # reacquire lock
923 $sync->{mm_tmp}->atfork_parent;
927 # only update last_commit for $i on reindex iff newer than current
928 sub update_last_commit ($$$$) {
929 my ($self, $git, $i, $cmt) = @_;
930 my $last = last_epoch_commit($self, $i);
931 if (defined $last && is_ancestor($git, $last, $cmt)) {
932 my @cmd = (qw(rev-list --count), "$last..$cmt");
933 chomp(my $n = $git->qx(@cmd));
934 return if $n ne '' && $n == 0;
936 last_epoch_commit($self, $i, $cmt);
939 sub git_dir_n ($$) { "$_[0]->{-inbox}->{mainrepo}/git/$_[1].git" }
941 sub last_commits ($$) {
942 my ($self, $epoch_max) = @_;
944 for (my $i = $epoch_max; $i >= 0; $i--) {
945 $heads->[$i] = last_epoch_commit($self, $i);
950 *is_ancestor = *PublicInbox::SearchIdx::is_ancestor;
952 # returns a revision range for git-log(1)
953 sub log_range ($$$$$) {
954 my ($self, $sync, $git, $i, $tip) = @_;
955 my $opt = $sync->{-opt};
956 my $pr = $opt->{-progress} if (($opt->{verbose} || 0) > 1);
957 my $cur = $sync->{ranges}->[$i] or do {
958 $pr->("$i.git indexing all of $tip") if $pr;
959 return $tip; # all of it
962 # fast equality check to avoid (v)fork+execve overhead
964 $sync->{ranges}->[$i] = undef;
968 my $range = "$cur..$tip";
969 $pr->("$i.git checking contiguity... ") if $pr;
970 if (is_ancestor($git, $cur, $tip)) { # common case
971 $pr->("OK\n") if $pr;
972 my $n = $git->qx(qw(rev-list --count), $range);
975 $sync->{ranges}->[$i] = undef;
976 $pr->("$i.git has nothing new\n") if $pr;
977 return; # nothing to do
979 $pr->("$i.git has $n changes since $cur\n") if $pr;
981 $pr->("FAIL\n") if $pr;
983 discontiguous range: $range
984 Rewritten history? (in $git->{git_dir})
986 chomp(my $base = $git->qx('merge-base', $tip, $cur));
988 $range = "$base..$tip";
989 warn "found merge-base: $base\n"
992 warn "discarding history at $cur\n";
995 reindexing $git->{git_dir} starting at
998 $sync->{unindex_range}->{$i} = "$base..$cur";
1003 sub sync_prepare ($$$) {
1004 my ($self, $sync, $epoch_max) = @_;
1005 my $pr = $sync->{-opt}->{-progress};
1007 my $head = $self->{-inbox}->{ref_head} || 'refs/heads/master';
1009 # reindex stops at the current heads and we later rerun index_sync
1011 my $reindex_heads = last_commits($self, $epoch_max) if $sync->{reindex};
1013 for (my $i = $epoch_max; $i >= 0; $i--) {
1014 die 'BUG: already indexing!' if $self->{reindex_pipe};
1015 my $git_dir = git_dir_n($self, $i);
1016 -d $git_dir or next; # missing epochs are fine
1017 my $git = PublicInbox::Git->new($git_dir);
1018 if ($reindex_heads) {
1019 $head = $reindex_heads->[$i] or next;
1021 chomp(my $tip = $git->qx(qw(rev-parse -q --verify), $head));
1023 next if $?; # new repo
1024 my $range = log_range($self, $sync, $git, $i, $tip) or next;
1025 $sync->{ranges}->[$i] = $range;
1027 # can't use 'rev-list --count' if we use --diff-filter
1028 $pr->("$i.git counting $range ... ") if $pr;
1030 my $fh = $git->popen(qw(log --pretty=tformat:%H
1031 --no-notes --no-color --no-renames
1032 --diff-filter=AM), $range, '--', 'm');
1034 $pr->("$n\n") if $pr;
1038 return 0 if (!$regen_max && !keys(%{$self->{unindex_range}}));
1040 # reindex should NOT see new commits anymore, if we do,
1041 # it's a problem and we need to notice it via die()
1042 my $pad = length($regen_max) + 1;
1043 $sync->{-regen_fmt} = "% ${pad}u/$regen_max\n";
1045 return -1 if $sync->{reindex};
1046 $regen_max + $self->{mm}->num_highwater() || 0;
1049 sub unindex_oid_remote ($$$) {
1050 my ($self, $oid, $mid) = @_;
1051 $_->remote_remove($oid, $mid) foreach @{$self->{idx_shards}};
1052 $self->{over}->remove_oid($oid, $mid);
1055 sub unindex_oid ($$$) {
1056 my ($self, $git, $oid) = @_;
1057 my $msgref = $git->cat_file($oid);
1058 my $mime = PublicInbox::MIME->new($msgref);
1059 my $mids = mids($mime->header_obj);
1060 $mime = $msgref = undef;
1061 my $over = $self->{over};
1062 foreach my $mid (@$mids) {
1065 while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
1066 $gone{$smsg->{num}} = 1 if $oid eq $smsg->{blob};
1069 my $n = scalar keys %gone;
1072 warn "BUG: multiple articles linked to $oid\n",
1073 join(',',sort keys %gone), "\n";
1075 foreach my $num (keys %gone) {
1076 $self->{unindexed}->{$_}++;
1077 $self->{mm}->num_delete($num);
1079 unindex_oid_remote($self, $oid, $mid);
1083 my $x40 = qr/[a-f0-9]{40}/;
1084 sub unindex ($$$$) {
1085 my ($self, $sync, $git, $unindex_range) = @_;
1086 my $un = $self->{unindexed} ||= {}; # num => removal count
1087 my $before = scalar keys %$un;
1088 my @cmd = qw(log --raw -r
1089 --no-notes --no-color --no-abbrev --no-renames);
1090 my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $unindex_range);
1092 /\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o or next;
1093 unindex_oid($self, $git, $1);
1095 delete $self->{reindex_pipe};
1098 return unless $sync->{-opt}->{prune};
1099 my $after = scalar keys %$un;
1100 return if $before == $after;
1102 # ensure any blob can not longer be accessed via dumb HTTP
1103 PublicInbox::Import::run_die(['git', "--git-dir=$git->{git_dir}",
1104 qw(-c gc.reflogExpire=now gc --prune=all)]);
1107 sub sync_ranges ($$$) {
1108 my ($self, $sync, $epoch_max) = @_;
1109 my $reindex = $sync->{reindex};
1111 return last_commits($self, $epoch_max) unless $reindex;
1112 return [] if ref($reindex) ne 'HASH';
1114 my $ranges = $reindex->{from}; # arrayref;
1115 if (ref($ranges) ne 'ARRAY') {
1116 die 'BUG: $reindex->{from} not an ARRAY';
1121 sub index_epoch ($$$) {
1122 my ($self, $sync, $i) = @_;
1124 my $git_dir = git_dir_n($self, $i);
1125 die 'BUG: already reindexing!' if $self->{reindex_pipe};
1126 -d $git_dir or return; # missing epochs are fine
1127 fill_alternates($self, $i);
1128 my $git = PublicInbox::Git->new($git_dir);
1129 if (my $unindex_range = delete $sync->{unindex_range}->{$i}) {
1130 unindex($self, $sync, $git, $unindex_range);
1132 defined(my $range = $sync->{ranges}->[$i]) or return;
1133 if (my $pr = $sync->{-opt}->{-progress}) {
1134 $pr->("$i.git indexing $range\n");
1137 my @cmd = qw(log --raw -r --pretty=tformat:%H
1138 --no-notes --no-color --no-abbrev --no-renames);
1139 my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $range);
1143 $self->{current_info} = "$i.git $_";
1144 if (/\A$x40$/o && !defined($cmt)) {
1146 } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o) {
1147 reindex_oid($self, $sync, $git, $1);
1148 } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\td$/o) {
1149 mark_deleted($self, $sync, $git, $1);
1153 delete $self->{reindex_pipe};
1154 update_last_commit($self, $git, $i, $cmt) if defined $cmt;
1157 # public, called by public-inbox-index
1159 my ($self, $opt) = @_;
1161 my $pr = $opt->{-progress};
1163 my $latest = git_dir_latest($self, \$epoch_max);
1164 return unless defined $latest;
1165 $self->idx_init($opt); # acquire lock
1167 D => {}, # "$mid\0$cid" => $oid
1168 unindex_range => {}, # EPOCH => oid_old..oid_new
1169 reindex => $opt->{reindex},
1172 $sync->{ranges} = sync_ranges($self, $sync, $epoch_max);
1173 $sync->{regen} = sync_prepare($self, $sync, $epoch_max);
1175 if ($sync->{regen}) {
1176 # tmp_clone seems to fail if inside a transaction, so
1177 # we rollback here (because we opened {mm} for reading)
1178 # Note: we do NOT rely on DBI transactions for atomicity;
1179 # only for batch performance.
1180 $self->{mm}->{dbh}->rollback;
1181 $self->{mm}->{dbh}->begin_work;
1182 $sync->{mm_tmp} = $self->{mm}->tmp_clone;
1185 # work backwards through history
1186 for (my $i = $epoch_max; $i >= 0; $i--) {
1187 index_epoch($self, $sync, $i);
1190 # unindex is required for leftovers if "deletes" affect messages
1191 # in a previous fetch+index window:
1192 if (my @leftovers = values %{delete $sync->{D}}) {
1193 my $git = $self->{-inbox}->git;
1194 unindex_oid($self, $git, $_) for @leftovers;
1199 if (my $nr = $sync->{nr}) {
1200 my $pr = $sync->{-opt}->{-progress};
1201 $pr->('all.git '.sprintf($sync->{-regen_fmt}, $nr)) if $pr;
1204 # reindex does not pick up new changes, so we rerun w/o it:
1205 if ($opt->{reindex}) {
1208 delete @again{qw(reindex -skip_lock)};
1209 index_sync($self, \%again);