my $PACKING_FACTOR = 0.4;
# assume 2 cores if GNU nproc(1) is not available
-my $NPROC = int($ENV{NPROC} || `nproc 2>/dev/null` || 2);
+sub nproc () {
+ int($ENV{NPROC} || `nproc 2>/dev/null` || 2);
+}
sub new {
my ($class, $v2ibx, $creat) = @_;
my $dir = $v2ibx->{mainrepo} or die "no mainrepo in inbox\n";
+ my $lock_path = "$dir/inbox.lock";
unless (-d $dir) {
if ($creat) {
require File::Path;
File::Path::mkpath($dir);
+ open my $fh, '>>', $lock_path or
+ die "failed to open $lock_path: $!\n";
} else {
die "$dir does not exist\n";
}
}
+
+ my $nparts = 0;
+ my $xpfx = "$dir/xap" . PublicInbox::Search::SCHEMA_VERSION;
+
+ # always load existing partitions in case core count changes:
+ if (-d $xpfx) {
+ foreach my $part (<$xpfx/*>) {
+ -d $part && $part =~ m!/\d+\z! or next;
+ eval {
+ Search::Xapian::Database->new($part)->close;
+ $nparts++;
+ };
+ }
+ }
+ $nparts = nproc() if ($nparts == 0);
+
my $self = {
-inbox => $v2ibx,
im => undef, # PublicInbox::Import
xap_rw => undef, # PublicInbox::V2SearchIdx
xap_ro => undef,
- partitions => $NPROC,
+ partitions => $nparts,
transact_bytes => 0,
+ lock_path => "$dir/inbox.lock",
# limit each repo to 1GB or so
rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR),
};
};
# crap, Message-ID is already known, hope somebody just resent:
- $self->done; # write barrier, clears $self->{skel}
+ $self->barrier;
foreach my $m (@$mids) {
# read-only lookup now safe to do after above barrier
my $existing = $self->lookup_content($mime, $m);
# very unlikely:
warn "<$mid> reused for mismatched content\n";
- $self->idx_init;
# try the rest of the mids
foreach my $i (1..$#$mids) {
my $hdr = $mime->header_obj;
my $dig = content_digest($mime);
- $$mid0 = $dig->clone->hexdigest . '@localhost';
+ $$mid0 = PublicInbox::Import::digest2mid($dig);
my $num = $self->{skel}->{mm}->mid_insert($$mid0);
unless (defined $num) {
# it's hard to spoof the last Received: header
my @recvd = $hdr->header_raw('Received');
$dig->add("Received: $_") foreach (@recvd);
- $$mid0 = $dig->clone->hexdigest . '@localhost';
+ $$mid0 = PublicInbox::Import::digest2mid($dig);
$num = $self->{skel}->{mm}->mid_insert($$mid0);
# fall back to a random Message-ID and give up determinism:
until (defined($num)) {
$dig->add(rand);
- $$mid0 = $dig->clone->hexdigest . '@localhost';
+ $$mid0 = PublicInbox::Import::digest2mid($dig);
warn "using random Message-ID <$$mid0> as fallback\n";
$num = $self->{skel}->{mm}->mid_insert($$mid0);
}
}
sub remove {
- my ($self, $mime, $msg) = @_;
- my $existing = $self->lookup_content($mime) or return;
-
- # don't touch ghosts or already junked messages
- return unless $existing->type eq 'mail';
-
- # always write removals to the current (latest) git repo since
- # we process chronologically
+ my ($self, $mime, $cmt_msg) = @_;
+ $self->barrier;
+ $self->idx_init;
my $im = $self->importer;
- my ($cmt, undef) = $im->remove($mime, $msg);
- $cmt = $im->get_mark($cmt);
- $self->unindex_msg($existing, $cmt);
+ my $ibx = $self->{-inbox};
+ my $srch = $ibx->search;
+ my $cid = content_id($mime);
+ my $skel = $self->{skel};
+ my $parts = $self->{idx_parts};
+ my $mm = $skel->{mm};
+ my $removed;
+ my $mids = mids($mime->header_obj);
+ foreach my $mid (@$mids) {
+ $srch->reopen->each_smsg_by_mid($mid, sub {
+ my ($smsg) = @_;
+ $smsg->load_expand;
+ my $msg = $ibx->msg_by_smsg($smsg);
+ if (!defined($msg)) {
+ warn "broken smsg for $mid\n";
+ return 1; # continue
+ }
+ my $orig = $$msg;
+ my $cur = PublicInbox::MIME->new($msg);
+ if (content_id($cur) eq $cid) {
+ $mm->num_delete($smsg->num);
+ # $removed should only be set once assuming
+ # no bugs in our deduplication code:
+ $removed = $smsg;
+ $removed->{mime} = $cur;
+ $im->remove(\$orig, $cmt_msg);
+ $orig = undef;
+ $removed->num; # memoize this for callers
+
+ my $oid = $smsg->{blob};
+ foreach my $idx (@$parts, $skel) {
+ $idx->remote_remove($oid, $mid);
+ }
+ }
+ 1; # continue
+ });
+ $self->barrier;
+ }
+ $removed;
}
sub done {
$self->searchidx_checkpoint(1);
}
+# issue a write barrier to ensure all data is visible to other processes
+# and read-only ops. Order of data importance is: git > SQLite > Xapian
+sub barrier {
+ my ($self) = @_;
+
+ if (my $im = $self->{im}) {
+ $im->barrier;
+ }
+ my $skel = $self->{skel};
+ my $parts = $self->{idx_parts};
+ if ($parts && $skel) {
+ my $dbh = $skel->{mm}->{dbh};
+ $dbh->commit; # SQLite data is second in importance
+
+ # Now deal with Xapian
+ $skel->barrier_init(scalar(@$parts));
+ # each partition needs to issue a barrier command to skel:
+ $_->barrier foreach @$parts;
+
+ $skel->barrier_wait; # wait for each Xapian partition
+
+ $dbh->begin_work;
+ }
+ $self->{transact_bytes} = 0;
+}
+
sub searchidx_checkpoint {
my ($self, $more) = @_;
my $im = PublicInbox::Import->new($git, undef, undef, $self->{-inbox});
$im->{bytes_added} = int($packed_bytes / $PACKING_FACTOR);
$im->{want_object_info} = 1;
- $im->{ssoma_lock} = 0;
+ $im->{lock_path} = $self->{lock_path};
$im->{path_type} = 'v2';
$self->{im} = $im;
}
my ($self, $mime, $mid) = @_;
my $ibx = $self->{-inbox};
- my $srch = $ibx->search;
+ my $srch = $ibx->search->reopen;
my $cid = content_id($mime);
my $found;
$srch->each_smsg_by_mid($mid, sub {