use warnings;
use base qw(PublicInbox::Lock);
use PublicInbox::SearchIdxPart;
-use PublicInbox::SearchIdxSkeleton;
use PublicInbox::MIME;
use PublicInbox::Git;
use PublicInbox::Import;
use PublicInbox::MID qw(mids);
use PublicInbox::ContentId qw(content_id content_digest);
use PublicInbox::Inbox;
+use PublicInbox::OverIdxFork;
+use PublicInbox::Msgmap;
+use IO::Handle;
# an estimate of the post-packed size to the raw uncompressed size
my $PACKING_FACTOR = 0.4;
partitions => $nparts,
parallel => 1,
transact_bytes => 0,
+ over => PublicInbox::OverIdxFork->new("$xpfx/over.sqlite3"),
lock_path => "$dir/inbox.lock",
# limit each repo to 1GB or so
rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR),
my $mids = mids($mime->header_obj);
if (@$mids) {
my $mid = $mids->[0];
- my $num = $self->{skel}->{mm}->mid_insert($mid);
+ my $num = $self->{mm}->mid_insert($mid);
if (defined $num) { # common case
$$mid0 = $mid;
return $num;
# try the rest of the mids
for(my $i = $#$mids; $i >= 1; $i--) {
my $m = $mids->[$i];
- $num = $self->{skel}->{mm}->mid_insert($m);
+ $num = $self->{mm}->mid_insert($m);
if (defined $num) {
warn "alternative <$m> for <$mid> found\n";
$$mid0 = $m;
my $hdr = $mime->header_obj;
my $dig = content_digest($mime);
$$mid0 = PublicInbox::Import::digest2mid($dig);
- my $num = $self->{skel}->{mm}->mid_insert($$mid0);
+ my $num = $self->{mm}->mid_insert($$mid0);
unless (defined $num) {
# it's hard to spoof the last Received: header
my @recvd = $hdr->header_raw('Received');
$dig->add("Received: $_") foreach (@recvd);
$$mid0 = PublicInbox::Import::digest2mid($dig);
- $num = $self->{skel}->{mm}->mid_insert($$mid0);
+ $num = $self->{mm}->mid_insert($$mid0);
# fall back to a random Message-ID and give up determinism:
until (defined($num)) {
$dig->add(rand);
$$mid0 = PublicInbox::Import::digest2mid($dig);
warn "using random Message-ID <$$mid0> as fallback\n";
- $num = $self->{skel}->{mm}->mid_insert($$mid0);
+ $num = $self->{mm}->mid_insert($$mid0);
}
}
PublicInbox::Import::append_mid($hdr, $$mid0);
# frequently activated.
delete $ibx->{$_} foreach (qw(git mm search));
+ my $over = $self->{over};
$ibx->umask_prepare;
$ibx->with_umask(sub {
$self->lock_acquire;
-
- # first time initialization, first we create the skeleton pipe:
- my $skel = PublicInbox::SearchIdxSkeleton->new($self);
- $self->{skel} = $skel;
+ $over->create($self);
# need to create all parts before initializing msgmap FD
my $max = $self->{partitions} - 1;
# idx_parts must be visible to all forked processes
my $idx = $self->{idx_parts} = [];
for my $i (0..$max) {
- push @$idx,
- PublicInbox::SearchIdxPart->new($self, $i, $skel);
+ push @$idx, PublicInbox::SearchIdxPart->new($self, $i);
}
- # Now that all subprocesses are up, we can open the FD for SQLite:
- $skel->_msgmap_init->{dbh}->begin_work;
+ # Now that all subprocesses are up, we can open the FDs
+ # for SQLite:
+ my $mm = $self->{mm} = PublicInbox::Msgmap->new_file(
+ "$self->{-inbox}->{mainrepo}/msgmap.sqlite3", 1);
+ $mm->{dbh}->begin_work;
});
}
my $ibx = $self->{-inbox};
my $srch = $ibx->search;
my $cid = content_id($mime);
- my $skel = $self->{skel};
my $parts = $self->{idx_parts};
- my $mm = $skel->{mm};
+ my $mm = $self->{mm};
my $removed;
my $mids = mids($mime->header_obj);
$orig = undef;
$removed->num; # memoize this for callers
- foreach my $idx (@$parts, $skel) {
+ foreach my $idx (@$parts) {
$idx->remote_remove($oid, $mid);
}
+ $self->{over}->remove_oid($oid, $mid);
}
1; # continue
});
sub done {
my ($self) = @_;
- my $locked = defined $self->{idx_parts};
my $im = delete $self->{im};
$im->done if $im; # PublicInbox::Import::done
- $self->searchidx_checkpoint(0);
- $self->lock_release if $locked;
+
+ if (my $mm = delete $self->{mm}) {
+ $mm->{dbh}->commit;
+ }
+
+ # order matters, we can only close {over} after all partitions
+ # are done because the partitions also write to {over}
+ my $parts = delete $self->{idx_parts};
+ if ($parts) {
+ $_->remote_commit for @$parts;
+ $_->remote_close for @$parts;
+ }
+
+ my $over = $self->{over};
+ $over->remote_commit;
+ $over->remote_close;
+ $self->{transact_bytes} = 0;
+ $self->lock_release if $parts;
}
sub checkpoint {
my ($self) = @_;
my $im = $self->{im};
$im->checkpoint if $im; # PublicInbox::Import::checkpoint
- $self->searchidx_checkpoint(1);
+ $self->barrier(1);
}
# issue a write barrier to ensure all data is visible to other processes
# and read-only ops. Order of data importance is: git > SQLite > Xapian
sub barrier {
- my ($self) = @_;
+ my ($self, $fsync) = @_;
if (my $im = $self->{im}) {
$im->barrier;
}
- my $skel = $self->{skel};
my $parts = $self->{idx_parts};
- if ($parts && $skel) {
- my $dbh = $skel->{mm}->{dbh};
- $dbh->commit; # SQLite data is second in importance
+ if ($parts) {
+ my $dbh = $self->{mm}->{dbh};
+ $dbh->commit; # SQLite msgmap data is second in importance
- # Now deal with Xapian
- $skel->barrier_init(scalar(@$parts));
- # each partition needs to issue a barrier command to skel:
- $_->remote_barrier foreach @$parts;
+ my $over = $self->{over};
- $skel->barrier_wait; # wait for each Xapian partition
+ # Now deal with Xapian and overview DB
+ $over->barrier_init(scalar(@$parts));
- $dbh->begin_work;
- }
- $self->{transact_bytes} = 0;
-}
-
-sub searchidx_checkpoint {
- my ($self, $more) = @_;
+ # each partition needs to issue a barrier command to over
+ $_->remote_barrier foreach @$parts;
- # order matters, we can only close {skel} after all partitions
- # are done because the partitions also write to {skel}
- if (my $parts = $self->{idx_parts}) {
- foreach my $idx (@$parts) {
- $idx->remote_commit; # propagates commit to skel
- $idx->remote_close unless $more;
- }
- delete $self->{idx_parts} unless $more;
- }
+ $over->barrier_wait; # wait for each Xapian partition
+ $over->commit_fsync if $fsync;
- if (my $skel = $self->{skel}) {
- my $dbh = $skel->{mm}->{dbh};
- $dbh->commit;
- if ($more) {
- $dbh->begin_work;
- } else {
- $skel->remote_close;
- delete $self->{skel};
- }
+ $dbh->begin_work;
}
$self->{transact_bytes} = 0;
}
} else {
$self->{im} = undef;
$im->done;
- $self->searchidx_checkpoint(1);
+ $self->barrier(1);
$im = undef;
my $git_dir = $self->git_init(++$self->{max_git});
my $git = PublicInbox::Git->new($git_dir);
if (my $im = $self->{im}) {
$im->atfork_child;
}
+ die "unexpected mm" if $self->{mm};
}
sub mark_deleted {
if (!defined($mid0) && $regen && !$del) {
$num = $$regen--;
die "BUG: ran out of article numbers\n" if $num <= 0;
- my $mm = $self->{skel}->{mm};
+ my $mm = $self->{mm};
foreach my $mid (reverse @$mids) {
if ($mm->mid_set($num, $mid) == 1) {
$mid0 = $mid;
my $head = $ibx->{ref_head} || 'refs/heads/master';
$self->idx_init; # acquire lock
my $x40 = qr/[a-f0-9]{40}/;
- my $mm_tmp = $self->{skel}->{mm}->tmp_clone;
+ my $mm_tmp = $self->{mm}->tmp_clone;
if (!$regen) {
my (undef, $max) = $mm_tmp->minmax;
unless (defined $max) {