1 # Copyright (C) 2018 all contributors <meta@public-inbox.org>
2 # License: AGPL-3.0+ <https://www.gnu.org/licenses/agpl-3.0.txt>
4 # This interface wraps and mimics PublicInbox::Import
5 # Used to write to V2 inboxes (see L<public-inbox-v2-format(5)>).
6 package PublicInbox::V2Writable;
9 use base qw(PublicInbox::Lock);
10 use PublicInbox::SearchIdxPart;
11 use PublicInbox::MIME;
13 use PublicInbox::Import;
14 use PublicInbox::MID qw(mids);
15 use PublicInbox::ContentId qw(content_id content_digest);
16 use PublicInbox::Inbox;
17 use PublicInbox::OverIdx;
18 use PublicInbox::Msgmap;
19 use PublicInbox::Spawn qw(spawn);
20 use PublicInbox::SearchIdx;
23 # an estimate of the post-packed size to the raw uncompressed size
24 my $PACKING_FACTOR = 0.4;
26 # assume 2 cores if GNU nproc(1) is not available
29 if (ref($creat_opt) eq 'HASH') {
30 if (defined(my $n = $creat_opt->{nproc})) {
35 my $n = int($ENV{NPROC} || `nproc 2>/dev/null` || 2);
36 # subtract for the main process and git-fast-import
41 sub count_partitions ($) {
44 my $xpfx = $self->{xpfx};
46 # always load existing partitions in case core count changes:
47 # Also, partition count may change while -watch is running
50 foreach my $part (<$xpfx/*>) {
51 -d $part && $part =~ m!/[0-9]+\z! or next;
53 Search::Xapian::Database->new($part)->close;
62 # $creat may be any true value, or 0/undef. A hashref is true,
63 # and $creat->{nproc} may be set to an integer
64 my ($class, $v2ibx, $creat) = @_;
65 my $dir = $v2ibx->{mainrepo} or die "no mainrepo in inbox\n";
69 File::Path::mkpath($dir);
71 die "$dir does not exist\n";
75 $v2ibx = PublicInbox::InboxWritable->new($v2ibx);
76 $v2ibx->umask_prepare;
78 my $xpfx = "$dir/xap" . PublicInbox::Search::SCHEMA_VERSION;
81 im => undef, # PublicInbox::Import
86 over => PublicInbox::OverIdx->new("$xpfx/over.sqlite3", 1),
87 lock_path => "$dir/inbox.lock",
88 # limit each git repo (epoch) to 1GB or so
89 rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR),
90 last_commit => [], # git repo -> commit
92 $self->{partitions} = count_partitions($self) || nproc_parts($creat);
98 my ($self, $parallel, $skip_epoch) = @_;
99 $self->{parallel} = $parallel;
102 git_dir_latest($self, \$epoch_max);
103 if (defined $skip_epoch && $epoch_max == -1) {
104 $epoch_max = $skip_epoch;
106 $self->git_init($epoch_max >= 0 ? $epoch_max : 0);
110 # returns undef on duplicate or spam
111 # mimics Import::add and wraps it for v2
113 my ($self, $mime, $check_cb) = @_;
114 $self->{-inbox}->with_umask(sub {
115 _add($self, $mime, $check_cb)
119 # indexes a message, returns true if checkpointing is needed
120 sub do_idx ($$$$$$$) {
121 my ($self, $msgref, $mime, $len, $num, $oid, $mid0) = @_;
122 $self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
123 my $npart = $self->{partitions};
124 my $part = $num % $npart;
125 my $idx = idx_part($self, $part);
126 $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime);
127 my $n = $self->{transact_bytes} += $len;
128 $n >= (PublicInbox::SearchIdx::BATCH_BYTES * $npart);
132 my ($self, $mime, $check_cb) = @_;
136 $mime = $check_cb->($mime) or return;
139 # All pipes (> $^F) known to Perl 5.6+ have FD_CLOEXEC set,
140 # as does SQLite 3.4.1+ (released in 2007-07-20), and
141 # Xapian 1.3.2+ (released 2015-03-15).
142 # For the most part, we can spawn git-fast-import without
143 # leaking FDs to it...
147 my $num = num_for($self, $mime, \$mid0);
148 defined $num or return; # duplicate
149 defined $mid0 or die "BUG: $mid0 undefined\n";
150 my $im = $self->importer;
151 my $cmt = $im->add($mime);
152 $cmt = $im->get_mark($cmt);
153 $self->{last_commit}->[$self->{epoch_max}] = $cmt;
155 my ($oid, $len, $msgref) = @{$im->{last_object}};
156 if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
164 my ($self, $mime, $mid0) = @_;
165 my $mids = mids($mime->header_obj);
167 my $mid = $mids->[0];
168 my $num = $self->{mm}->mid_insert($mid);
169 if (defined $num) { # common case
174 # crap, Message-ID is already known, hope somebody just resent:
175 foreach my $m (@$mids) {
176 # read-only lookup now safe to do after above barrier
177 my $existing = lookup_content($self, $mime, $m);
178 # easy, don't store duplicates
179 # note: do not add more diagnostic info here since
180 # it gets noisy on public-inbox-watch restarts
184 # AltId may pre-populate article numbers (e.g. X-Mail-Count
185 # or NNTP article number), use that article number if it's
187 my $altid = $self->{-inbox}->{altid};
188 if ($altid && grep(/:file=msgmap\.sqlite3\z/, @$altid)) {
189 my $num = $self->{mm}->num_for($mid);
191 if (defined $num && !$self->{over}->get_art($num)) {
198 warn "<$mid> reused for mismatched content\n";
200 # try the rest of the mids
201 for(my $i = $#$mids; $i >= 1; $i--) {
203 $num = $self->{mm}->mid_insert($m);
205 warn "alternative <$m> for <$mid> found\n";
211 # none of the existing Message-IDs are good, generate a new one:
212 num_for_harder($self, $mime, $mid0);
216 my ($self, $mime, $mid0) = @_;
218 my $hdr = $mime->header_obj;
219 my $dig = content_digest($mime);
220 $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
221 my $num = $self->{mm}->mid_insert($$mid0);
222 unless (defined $num) {
223 # it's hard to spoof the last Received: header
224 my @recvd = $hdr->header_raw('Received');
225 $dig->add("Received: $_") foreach (@recvd);
226 $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
227 $num = $self->{mm}->mid_insert($$mid0);
229 # fall back to a random Message-ID and give up determinism:
230 until (defined($num)) {
232 $$mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
233 warn "using random Message-ID <$$mid0> as fallback\n";
234 $num = $self->{mm}->mid_insert($$mid0);
237 PublicInbox::Import::append_mid($hdr, $$mid0);
242 my ($self, $part) = @_;
243 $self->{idx_parts}->[$part];
248 my ($self, $opt) = @_;
249 return if $self->{idx_parts};
250 my $ibx = $self->{-inbox};
252 # do not leak read-only FDs to child processes, we only have these
253 # FDs for duplicate detection so they should not be
254 # frequently activated.
255 delete $ibx->{$_} foreach (qw(git mm search));
257 my $indexlevel = $ibx->{indexlevel};
258 if ($indexlevel && $indexlevel eq 'basic') {
259 $self->{parallel} = 0;
262 if ($self->{parallel}) {
263 pipe(my ($r, $w)) or die "pipe failed: $!";
264 # pipe for barrier notifications doesn't need to be big,
266 fcntl($w, 1031, 4096) if $^O eq 'linux';
267 $self->{bnote} = [ $r, $w ];
271 my $over = $self->{over};
273 $ibx->with_umask(sub {
274 $self->lock_acquire unless ($opt && $opt->{-skip_lock});
277 # -compact can change partition count while -watch is idle
278 my $nparts = count_partitions($self);
279 if ($nparts && $nparts != $self->{partitions}) {
280 $self->{partitions} = $nparts;
283 # need to create all parts before initializing msgmap FD
284 my $max = $self->{partitions} - 1;
286 # idx_parts must be visible to all forked processes
287 my $idx = $self->{idx_parts} = [];
288 for my $i (0..$max) {
289 push @$idx, PublicInbox::SearchIdxPart->new($self, $i);
292 # Now that all subprocesses are up, we can open the FDs
294 my $mm = $self->{mm} = PublicInbox::Msgmap->new_file(
295 "$self->{-inbox}->{mainrepo}/msgmap.sqlite3", 1);
296 $mm->{dbh}->begin_work;
300 sub purge_oids ($$) {
301 my ($self, $purge) = @_; # $purge = { $object_id => \'', ... }
303 my $pfx = "$self->{-inbox}->{mainrepo}/git";
305 my $max = $self->{epoch_max};
307 unless (defined($max)) {
308 defined(my $latest = git_dir_latest($self, \$max)) or return;
309 $self->{epoch_max} = $max;
311 foreach my $i (0..$max) {
312 my $git_dir = "$pfx/$i.git";
314 my $git = PublicInbox::Git->new($git_dir);
315 my $im = $self->import_init($git, 0, 1);
316 $purges->[$i] = $im->replace_oids($purge);
322 sub content_ids ($) {
324 my @cids = ( content_id($mime) );
326 # Email::MIME->as_string doesn't always round-trip, so we may
327 # use a second content_id
328 my $rt = content_id(PublicInbox::MIME->new(\($mime->as_string)));
329 push @cids, $rt if $cids[0] ne $rt;
333 sub content_matches ($$) {
334 my ($cids, $existing) = @_;
335 my $cid = content_id($existing);
337 return 1 if $_ eq $cid
342 sub remove_internal ($$$$) {
343 my ($self, $mime, $cmt_msg, $purge) = @_;
345 my $im = $self->importer unless $purge;
346 my $over = $self->{over};
347 my $cids = content_ids($mime);
348 my $parts = $self->{idx_parts};
349 my $mm = $self->{mm};
351 my $mids = mids($mime->header_obj);
353 # We avoid introducing new blobs into git since the raw content
354 # can be slightly different, so we do not need the user-supplied
355 # message now that we have the mids and content_id
359 foreach my $mid (@$mids) {
362 while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
363 my $msg = get_blob($self, $smsg);
364 if (!defined($msg)) {
365 warn "broken smsg for $mid\n";
369 my $cur = PublicInbox::MIME->new($msg);
370 if (content_matches($cids, $cur)) {
371 $smsg->{mime} = $cur;
372 $gone{$smsg->{num}} = [ $smsg, \$orig ];
375 my $n = scalar keys %gone;
378 warn "BUG: multiple articles linked to <$mid>\n",
379 join(',', sort keys %gone), "\n";
381 foreach my $num (keys %gone) {
382 my ($smsg, $orig) = @{$gone{$num}};
383 $mm->num_delete($num);
384 # $removed should only be set once assuming
385 # no bugs in our deduplication code:
387 my $oid = $smsg->{blob};
389 $purge->{$oid} = \'';
391 ($mark, undef) = $im->remove($orig, $cmt_msg);
394 unindex_oid_remote($self, $oid, $mid);
399 my $cmt = $im->get_mark($mark);
400 $self->{last_commit}->[$self->{epoch_max}] = $cmt;
402 if ($purge && scalar keys %$purge) {
403 return purge_oids($self, $purge);
410 my ($self, $mime, $cmt_msg) = @_;
411 $self->{-inbox}->with_umask(sub {
412 remove_internal($self, $mime, $cmt_msg, undef);
418 my ($self, $mime) = @_;
419 my $purges = $self->{-inbox}->with_umask(sub {
420 remove_internal($self, $mime, undef, {});
422 $self->idx_init if @$purges; # ->done is called on purges
423 for my $i (0..$#$purges) {
424 defined(my $cmt = $purges->[$i]) or next;
425 $self->{last_commit}->[$i] = $cmt;
430 sub last_commit_part ($$;$) {
431 my ($self, $i, $cmt) = @_;
432 my $v = PublicInbox::Search::SCHEMA_VERSION();
433 $self->{mm}->last_commit_xap($v, $i, $cmt);
436 sub set_last_commits ($) {
438 defined(my $epoch_max = $self->{epoch_max}) or return;
439 my $last_commit = $self->{last_commit};
440 foreach my $i (0..$epoch_max) {
441 defined(my $cmt = $last_commit->[$i]) or next;
442 $last_commit->[$i] = undef;
443 last_commit_part($self, $i, $cmt);
449 $self->{bnote} or return;
451 my $barrier = { map { $_ => 1 } (0..$n) };
455 my ($self, $barrier) = @_;
456 my $bnote = $self->{bnote} or return;
458 while (scalar keys %$barrier) {
459 defined(my $l = $r->getline) or die "EOF on barrier_wait: $!";
460 $l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l";
461 delete $barrier->{$1} or die "bad part[$1] on barrier wait";
466 sub checkpoint ($;$) {
467 my ($self, $wait) = @_;
469 if (my $im = $self->{im}) {
476 my $parts = $self->{idx_parts};
478 my $dbh = $self->{mm}->{dbh};
480 # SQLite msgmap data is second in importance
483 # SQLite overview is third
484 $self->{over}->commit_lazy;
486 # Now deal with Xapian
488 my $barrier = $self->barrier_init(scalar @$parts);
490 # each partition needs to issue a barrier command
491 $_->remote_barrier for @$parts;
493 # wait for each Xapian partition
494 $self->barrier_wait($barrier);
496 $_->remote_commit for @$parts;
499 # last_commit is special, don't commit these until
500 # remote partitions are done:
502 set_last_commits($self);
507 $self->{transact_bytes} = 0;
510 # issue a write barrier to ensure all data is visible to other processes
511 # and read-only ops. Order of data importance is: git > SQLite > Xapian
513 sub barrier { checkpoint($_[0], 1) };
518 my $im = delete $self->{im};
519 $im->done if $im; # PublicInbox::Import::done
521 my $mm = delete $self->{mm};
522 $mm->{dbh}->commit if $mm;
523 my $parts = delete $self->{idx_parts};
525 $_->remote_close for @$parts;
527 $self->{over}->disconnect;
528 delete $self->{bnote};
529 $self->{transact_bytes} = 0;
530 $self->lock_release if $parts;
531 $self->{-inbox}->git->cleanup;
534 sub fill_alternates ($$) {
535 my ($self, $epoch) = @_;
537 my $pfx = "$self->{-inbox}->{mainrepo}/git";
538 my $all = "$self->{-inbox}->{mainrepo}/all.git";
541 PublicInbox::Import::init_bare($all);
543 @cmd = (qw/git config/, "--file=$pfx/$epoch.git/config",
544 'include.path', '../../all.git/config');
545 PublicInbox::Import::run_die(\@cmd);
547 my $alt = "$all/objects/info/alternates";
551 open(my $fh, '<', $alt) or die "open < $alt: $!\n";
552 %alts = map { chomp; $_ => 1 } (<$fh>);
554 foreach my $i (0..$epoch) {
555 my $dir = "../../git/$i.git/objects";
556 push @add, $dir if !$alts{$dir} && -d "$pfx/$i.git";
559 open my $fh, '>>', $alt or die "open >> $alt: $!\n";
560 foreach my $dir (@add) {
561 print $fh "$dir\n" or die "print >> $alt: $!\n";
563 close $fh or die "close $alt: $!\n";
567 my ($self, $epoch) = @_;
568 my $git_dir = "$self->{-inbox}->{mainrepo}/git/$epoch.git";
569 my @cmd = (qw(git init --bare -q), $git_dir);
570 PublicInbox::Import::run_die(\@cmd);
571 fill_alternates($self, $epoch);
576 my ($self, $max) = @_;
578 my $pfx = "$self->{-inbox}->{mainrepo}/git";
579 return unless -d $pfx;
581 opendir my $dh, $pfx or die "opendir $pfx: $!\n";
582 while (defined(my $git_dir = readdir($dh))) {
583 $git_dir =~ m!\A([0-9]+)\.git\z! or next;
586 $latest = "$pfx/$git_dir";
594 my $im = $self->{im};
596 if ($im->{bytes_added} < $self->{rotate_bytes}) {
603 my $git_dir = $self->git_init(++$self->{epoch_max});
604 my $git = PublicInbox::Git->new($git_dir);
605 return $self->import_init($git, 0);
610 my $latest = git_dir_latest($self, \$max);
611 if (defined $latest) {
612 my $git = PublicInbox::Git->new($latest);
613 my $packed_bytes = $git->packed_bytes;
614 my $unpacked_bytes = $packed_bytes / $PACKING_FACTOR;
616 if ($unpacked_bytes >= $self->{rotate_bytes}) {
619 $self->{epoch_max} = $max;
620 return $self->import_init($git, $packed_bytes);
623 $self->{epoch_max} = $epoch;
624 $latest = $self->git_init($epoch);
625 $self->import_init(PublicInbox::Git->new($latest), 0);
629 my ($self, $git, $packed_bytes, $tmp) = @_;
630 my $im = PublicInbox::Import->new($git, undef, undef, $self->{-inbox});
631 $im->{bytes_added} = int($packed_bytes / $PACKING_FACTOR);
632 $im->{want_object_info} = 1;
633 $im->{lock_path} = undef;
634 $im->{path_type} = 'v2';
635 $self->{im} = $im unless $tmp;
641 my ($mid, $cur, $new) = @_;
642 use File::Temp qw(tempfile);
644 my ($ah, $an) = tempfile('email-cur-XXXXXXXX', TMPDIR => 1);
645 print $ah $cur->as_string or die "print: $!";
646 close $ah or die "close: $!";
647 my ($bh, $bn) = tempfile('email-new-XXXXXXXX', TMPDIR => 1);
648 PublicInbox::Import::drop_unwanted_headers($new);
649 print $bh $new->as_string or die "print: $!";
650 close $bh or die "close: $!";
651 my $cmd = [ qw(diff -u), $an, $bn ];
652 print STDERR "# MID conflict <$mid>\n";
653 my $pid = spawn($cmd, undef, { 1 => 2 });
654 defined $pid or die "diff failed to spawn $!";
655 waitpid($pid, 0) == $pid or die "diff did not finish";
660 my ($self, $smsg) = @_;
661 if (my $im = $self->{im}) {
662 my $msg = $im->cat_blob($smsg->{blob});
665 # older message, should be in alternates
666 my $ibx = $self->{-inbox};
667 $ibx->msg_by_smsg($smsg);
670 sub lookup_content ($$$) {
671 my ($self, $mime, $mid) = @_;
672 my $over = $self->{over};
673 my $cids = content_ids($mime);
675 while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
676 my $msg = get_blob($self, $smsg);
677 if (!defined($msg)) {
678 warn "broken smsg for $mid\n";
681 my $cur = PublicInbox::MIME->new($msg);
682 if (content_matches($cids, $cur)) {
683 $smsg->{mime} = $cur;
688 # XXX DEBUG_DIFF is experimental and may be removed
689 diff($mid, $cur, $mime) if $ENV{DEBUG_DIFF};
696 my $fh = delete $self->{reindex_pipe};
698 if (my $parts = $self->{idx_parts}) {
699 $_->atfork_child foreach @$parts;
701 if (my $im = $self->{im}) {
704 die "unexpected mm" if $self->{mm};
705 close $self->{bnote}->[0] or die "close bnote[0]: $!\n";
709 sub mark_deleted ($$$$) {
710 my ($self, $sync, $git, $oid) = @_;
711 my $msgref = $git->cat_file($oid);
712 my $mime = PublicInbox::MIME->new($$msgref);
713 my $mids = mids($mime->header_obj);
714 my $cid = content_id($mime);
715 foreach my $mid (@$mids) {
716 $sync->{D}->{"$mid\0$cid"} = $oid;
720 sub reindex_oid ($$$$) {
721 my ($self, $sync, $git, $oid) = @_;
723 my $msgref = $git->cat_file($oid, \$len);
724 my $mime = PublicInbox::MIME->new($$msgref);
725 my $mids = mids($mime->header_obj);
726 my $cid = content_id($mime);
728 # get the NNTP article number we used before, highest number wins
729 # and gets deleted from sync->{mm_tmp};
733 foreach my $mid (@$mids) {
734 $del += delete($sync->{D}->{"$mid\0$cid"}) ? 1 : 0;
735 my $n = $sync->{mm_tmp}->num_for($mid);
736 if (defined $n && $n > $num) {
739 $self->{mm}->mid_set($num, $mid0);
742 if (!defined($mid0) && !$del) {
743 $num = $sync->{regen}--;
744 die "BUG: ran out of article numbers\n" if $num <= 0;
745 my $mm = $self->{mm};
746 foreach my $mid (reverse @$mids) {
747 if ($mm->mid_set($num, $mid) == 1) {
752 if (!defined($mid0)) {
753 my $id = '<' . join('> <', @$mids) . '>';
754 warn "Message-ID $id unusable for $num\n";
755 foreach my $mid (@$mids) {
756 defined(my $n = $mm->num_for($mid)) or next;
757 warn "#$n previously mapped for <$mid>\n";
762 if (!defined($mid0) || $del) {
763 if (!defined($mid0) && $del) { # expected for deletes
764 $num = $sync->{regen}--;
765 $self->{mm}->num_highwater($num) if !$sync->{reindex};
769 my $id = '<' . join('> <', @$mids) . '>';
771 warn "Skipping $id, no article number found\n";
772 if ($del && defined($mid0)) {
773 warn "$id was deleted $del " .
774 "time(s) but mapped to article #$num\n";
779 $sync->{mm_tmp}->mid_delete($mid0) or
780 die "failed to delete <$mid0> for article #$num\n";
782 if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
784 $sync->{mm_tmp}->atfork_prepare;
785 $self->done; # release lock
787 if (my $pr = $sync->{-opt}->{-progress}) {
788 my ($bn) = (split('/', $git->{git_dir}))[-1];
789 $pr->("$bn ".sprintf($sync->{-regen_fmt}, $sync->{nr}));
792 # allow -watch or -mda to write...
793 $self->idx_init; # reacquire lock
794 $sync->{mm_tmp}->atfork_parent;
798 # only update last_commit for $i on reindex iff newer than current
799 sub update_last_commit ($$$$) {
800 my ($self, $git, $i, $cmt) = @_;
801 my $last = last_commit_part($self, $i);
802 if (defined $last && is_ancestor($git, $last, $cmt)) {
803 my @cmd = (qw(rev-list --count), "$last..$cmt");
804 chomp(my $n = $git->qx(@cmd));
805 return if $n ne '' && $n == 0;
807 last_commit_part($self, $i, $cmt);
810 sub git_dir_n ($$) { "$_[0]->{-inbox}->{mainrepo}/git/$_[1].git" }
812 sub last_commits ($$) {
813 my ($self, $epoch_max) = @_;
815 for (my $i = $epoch_max; $i >= 0; $i--) {
816 $heads->[$i] = last_commit_part($self, $i);
821 *is_ancestor = *PublicInbox::SearchIdx::is_ancestor;
823 # returns a revision range for git-log(1)
824 sub log_range ($$$$$) {
825 my ($self, $sync, $git, $i, $tip) = @_;
826 my $opt = $sync->{-opt};
827 my $pr = $opt->{-progress} if (($opt->{verbose} || 0) > 1);
828 my $cur = $sync->{ranges}->[$i] or do {
829 $pr->("$i.git indexing all of $tip") if $pr;
830 return $tip; # all of it
833 # fast equality check to avoid (v)fork+execve overhead
835 $sync->{ranges}->[$i] = undef;
839 my $range = "$cur..$tip";
840 $pr->("$i.git checking contiguity... ") if $pr;
841 if (is_ancestor($git, $cur, $tip)) { # common case
842 $pr->("OK\n") if $pr;
843 my $n = $git->qx(qw(rev-list --count), $range);
846 $sync->{ranges}->[$i] = undef;
847 $pr->("$i.git has nothing new\n") if $pr;
848 return; # nothing to do
850 $pr->("$i.git has $n changes since $cur\n") if $pr;
852 $pr->("FAIL\n") if $pr;
854 discontiguous range: $range
855 Rewritten history? (in $git->{git_dir})
857 chomp(my $base = $git->qx('merge-base', $tip, $cur));
859 $range = "$base..$tip";
860 warn "found merge-base: $base\n"
863 warn "discarding history at $cur\n";
866 reindexing $git->{git_dir} starting at
869 $sync->{unindex_range}->{$i} = "$base..$cur";
874 sub sync_prepare ($$$) {
875 my ($self, $sync, $epoch_max) = @_;
876 my $pr = $sync->{-opt}->{-progress};
878 my $head = $self->{-inbox}->{ref_head} || 'refs/heads/master';
880 # reindex stops at the current heads and we later rerun index_sync
882 my $reindex_heads = last_commits($self, $epoch_max) if $sync->{reindex};
884 for (my $i = $epoch_max; $i >= 0; $i--) {
885 die 'BUG: already indexing!' if $self->{reindex_pipe};
886 my $git_dir = git_dir_n($self, $i);
887 -d $git_dir or next; # missing parts are fine
888 my $git = PublicInbox::Git->new($git_dir);
889 if ($reindex_heads) {
890 $head = $reindex_heads->[$i] or next;
892 chomp(my $tip = $git->qx(qw(rev-parse -q --verify), $head));
894 next if $?; # new repo
895 my $range = log_range($self, $sync, $git, $i, $tip) or next;
896 $sync->{ranges}->[$i] = $range;
898 # can't use 'rev-list --count' if we use --diff-filter
899 $pr->("$i.git counting $range ... ") if $pr;
901 my $fh = $git->popen(qw(log --pretty=tformat:%H
902 --no-notes --no-color --no-renames
903 --diff-filter=AM), $range, '--', 'm');
905 $pr->("$n\n") if $pr;
909 return 0 if (!$regen_max && !keys(%{$self->{unindex_range}}));
911 # reindex should NOT see new commits anymore, if we do,
912 # it's a problem and we need to notice it via die()
913 my $pad = length($regen_max) + 1;
914 $sync->{-regen_fmt} = "% ${pad}u/$regen_max\n";
916 return -1 if $sync->{reindex};
917 $regen_max + $self->{mm}->num_highwater() || 0;
920 sub unindex_oid_remote ($$$) {
921 my ($self, $oid, $mid) = @_;
922 $_->remote_remove($oid, $mid) foreach @{$self->{idx_parts}};
923 $self->{over}->remove_oid($oid, $mid);
926 sub unindex_oid ($$$) {
927 my ($self, $git, $oid) = @_;
928 my $msgref = $git->cat_file($oid);
929 my $mime = PublicInbox::MIME->new($msgref);
930 my $mids = mids($mime->header_obj);
931 $mime = $msgref = undef;
932 my $over = $self->{over};
933 foreach my $mid (@$mids) {
936 while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
937 $gone{$smsg->{num}} = 1 if $oid eq $smsg->{blob};
940 my $n = scalar keys %gone;
943 warn "BUG: multiple articles linked to $oid\n",
944 join(',',sort keys %gone), "\n";
946 foreach my $num (keys %gone) {
947 $self->{unindexed}->{$_}++;
948 $self->{mm}->num_delete($num);
950 unindex_oid_remote($self, $oid, $mid);
954 my $x40 = qr/[a-f0-9]{40}/;
956 my ($self, $sync, $git, $unindex_range) = @_;
957 my $un = $self->{unindexed} ||= {}; # num => removal count
958 my $before = scalar keys %$un;
959 my @cmd = qw(log --raw -r
960 --no-notes --no-color --no-abbrev --no-renames);
961 my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $unindex_range);
963 /\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o or next;
964 unindex_oid($self, $git, $1);
966 delete $self->{reindex_pipe};
969 return unless $sync->{-opt}->{prune};
970 my $after = scalar keys %$un;
971 return if $before == $after;
973 # ensure any blob can not longer be accessed via dumb HTTP
974 PublicInbox::Import::run_die(['git', "--git-dir=$git->{git_dir}",
975 qw(-c gc.reflogExpire=now gc --prune=all)]);
978 sub sync_ranges ($$$) {
979 my ($self, $sync, $epoch_max) = @_;
980 my $reindex = $sync->{reindex};
982 return last_commits($self, $epoch_max) unless $reindex;
983 return [] if ref($reindex) ne 'HASH';
985 my $ranges = $reindex->{from}; # arrayref;
986 if (ref($ranges) ne 'ARRAY') {
987 die 'BUG: $reindex->{from} not an ARRAY';
992 sub index_epoch ($$$) {
993 my ($self, $sync, $i) = @_;
995 my $git_dir = git_dir_n($self, $i);
996 die 'BUG: already reindexing!' if $self->{reindex_pipe};
997 -d $git_dir or return; # missing parts are fine
998 fill_alternates($self, $i);
999 my $git = PublicInbox::Git->new($git_dir);
1000 if (my $unindex_range = delete $sync->{unindex_range}->{$i}) {
1001 unindex($self, $sync, $git, $unindex_range);
1003 defined(my $range = $sync->{ranges}->[$i]) or return;
1004 if (my $pr = $sync->{-opt}->{-progress}) {
1005 $pr->("$i.git indexing $range\n");
1008 my @cmd = qw(log --raw -r --pretty=tformat:%H
1009 --no-notes --no-color --no-abbrev --no-renames);
1010 my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $range);
1014 $self->{current_info} = "$i.git $_";
1015 if (/\A$x40$/o && !defined($cmt)) {
1017 } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o) {
1018 reindex_oid($self, $sync, $git, $1);
1019 } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\td$/o) {
1020 mark_deleted($self, $sync, $git, $1);
1024 delete $self->{reindex_pipe};
1025 update_last_commit($self, $git, $i, $cmt) if defined $cmt;
1028 # public, called by public-inbox-index
1030 my ($self, $opt) = @_;
1032 my $pr = $opt->{-progress};
1034 my $latest = git_dir_latest($self, \$epoch_max);
1035 return unless defined $latest;
1036 $self->idx_init($opt); # acquire lock
1038 D => {}, # "$mid\0$cid" => $oid
1039 unindex_range => {}, # EPOCH => oid_old..oid_new
1040 reindex => $opt->{reindex},
1043 $sync->{ranges} = sync_ranges($self, $sync, $epoch_max);
1044 $sync->{regen} = sync_prepare($self, $sync, $epoch_max);
1046 if ($sync->{regen}) {
1047 # tmp_clone seems to fail if inside a transaction, so
1048 # we rollback here (because we opened {mm} for reading)
1049 # Note: we do NOT rely on DBI transactions for atomicity;
1050 # only for batch performance.
1051 $self->{mm}->{dbh}->rollback;
1052 $self->{mm}->{dbh}->begin_work;
1053 $sync->{mm_tmp} = $self->{mm}->tmp_clone;
1056 # work backwards through history
1057 for (my $i = $epoch_max; $i >= 0; $i--) {
1058 index_epoch($self, $sync, $i);
1061 # unindex is required for leftovers if "deletes" affect messages
1062 # in a previous fetch+index window:
1063 if (my @leftovers = values %{delete $sync->{D}}) {
1064 my $git = $self->{-inbox}->git;
1065 unindex_oid($self, $git, $_) for @leftovers;
1070 if (my $nr = $sync->{nr}) {
1071 my $pr = $sync->{-opt}->{-progress};
1072 $pr->('all.git '.sprintf($sync->{-regen_fmt}, $nr)) if $pr;
1075 # reindex does not pick up new changes, so we rerun w/o it:
1076 if ($opt->{reindex}) {
1079 delete @again{qw(reindex -skip_lock)};
1080 index_sync($self, \%again);