package PublicInbox::SearchIdx;
use strict;
use warnings;
-use Fcntl qw(:flock :DEFAULT);
+use base qw(PublicInbox::Search PublicInbox::Lock);
use PublicInbox::MIME;
-use base qw(PublicInbox::Search);
use PublicInbox::MID qw/mid_clean id_compress mid_mime mids references/;
use PublicInbox::MsgIter;
use Carp qw(croak);
my ($self) = @_;
my $xdb = delete $self->{xdb} or croak 'not acquired';
$xdb->close;
- _lock_release($self) if $self->{creat};
+ $self->lock_release if $self->{creat};
undef;
}
my $flag = Search::Xapian::DB_OPEN;
if ($self->{creat}) {
require File::Path;
- _lock_acquire($self);
+ $self->lock_acquire;
File::Path::mkpath($dir);
$flag = Search::Xapian::DB_CREATE_OR_OPEN;
}
$self->{xdb} = Search::Xapian::WritableDatabase->new($dir, $flag);
}
-# we only acquire the flock if creating or reindexing;
-# PublicInbox::Import already has the lock on its own.
-sub _lock_acquire {
- my ($self) = @_;
- croak 'already locked' if $self->{lockfh};
- my $lock_path = $self->{lock_path} or return;
- sysopen(my $lockfh, $lock_path, O_WRONLY|O_CREAT) or
- die "failed to open lock $lock_path: $!\n";
- flock($lockfh, LOCK_EX) or die "lock failed: $!\n";
- $self->{lockfh} = $lockfh;
-}
-
-sub _lock_release {
- my ($self) = @_;
- return unless $self->{lock_path};
- my $lockfh = delete $self->{lockfh} or croak 'not locked';
- flock($lockfh, LOCK_UN) or die "unlock failed: $!\n";
- close $lockfh or die "close failed: $!\n";
-}
-
sub add_val ($$$) {
my ($doc, $col, $num) = @_;
$num = Search::Xapian::sortable_serialise($num);
my $lines = $values->[PublicInbox::Search::LINES];
add_val($doc, PublicInbox::Search::LINES, $lines);
- my $yyyymmdd = strftime('%Y%m%d', gmtime($ts));
+ my $ds = $values->[PublicInbox::Search::DS];
+ add_val($doc, PublicInbox::Search::DS, $ds);
+ my $yyyymmdd = strftime('%Y%m%d', gmtime($ds));
add_val($doc, PublicInbox::Search::YYYYMMDD, $yyyymmdd);
}
}
my $lines = $mime->body_raw =~ tr!\n!\n!;
- my @values = ($smsg->ts, $num, $bytes, $lines);
+ my @values = ($smsg->ds, $num, $bytes, $lines, $smsg->ts);
add_values($doc, \@values);
my $tg = $self->term_generator;
# populates smsg->references for smsg->to_doc_data
my $refs = parse_references($smsg);
- $mid0 = $mids->[0] unless defined $mid0;
+ $mid0 = $mids->[0] unless defined $mid0; # v1 compatibility
my $data = $smsg->to_doc_data($oid, $mid0);
foreach my $mid (@$mids) {
$tg->index_text($mid, 1, 'XM');
}
}
+ $self->delete_article($num) if defined $num; # for reindexing
if ($skel) {
push @values, $mids, $xpath, $data;
$skel->index_skeleton(\@values);
$doc->add_boolean_term('Q' . $_) foreach @$mids;
+ $doc->add_boolean_term('XNUM' . $num) if defined $num;
$doc_id = $self->{xdb}->add_document($doc);
} else {
$doc_id = link_and_save($self, $doc, $mids, $refs,
$doc_id;
}
-# returns deleted doc_id on success, undef on missing
+# returns begin and end PostingIterator
+sub find_doc_ids {
+ my ($self, $termval) = @_;
+ my $db = $self->{xdb};
+
+ ($db->postlist_begin($termval), $db->postlist_end($termval));
+}
+
+sub batch_do {
+ my ($self, $termval, $cb) = @_;
+ my $batch_size = 1000; # don't let @ids grow too large to avoid OOM
+ while (1) {
+ my ($head, $tail) = $self->find_doc_ids($termval);
+ return if $head == $tail;
+ my @ids;
+ for (; $head != $tail && @ids < $batch_size; $head->inc) {
+ push @ids, $head->get_docid;
+ }
+ $cb->(\@ids);
+ }
+}
+
sub remove_message {
my ($self, $mid) = @_;
my $db = $self->{xdb};
- my $doc_id;
+ my $called;
$mid = mid_clean($mid);
eval {
- my ($head, $tail) = $self->find_doc_ids('Q' . $mid);
- if ($head->equal($tail)) {
- warn "cannot remove non-existent <$mid>\n";
- }
- for (; $head != $tail; $head->inc) {
- my $docid = $head->get_docid;
- $db->delete_document($docid);
- }
+ batch_do($self, 'Q' . $mid, sub {
+ my ($ids) = @_;
+ $db->delete_document($_) for @$ids;
+ $called = 1;
+ });
};
-
if ($@) {
warn "failed to remove message <$mid>: $@\n";
- return undef;
+ } elsif (!$called) {
+ warn "cannot remove non-existent <$mid>\n";
}
- $doc_id;
+}
+
+sub delete_article {
+ my ($self, $num) = @_;
+ my $ndel = 0;
+ batch_do($self, 'XNUM' . $num, sub {
+ my ($ids) = @_;
+ $ndel += scalar @$ids;
+ $self->{xdb}->delete_document($_) for @$ids;
+ });
+}
+
+# MID is a hint in V2
+sub remove_by_oid {
+ my ($self, $oid, $mid) = @_;
+ my $db = $self->{xdb};
+
+ # XXX careful, we cannot use batch_do here since we conditionally
+ # delete documents based on other factors, so we cannot call
+ # find_doc_ids twice.
+ my ($head, $tail) = $self->find_doc_ids('Q' . $mid);
+ return if $head == $tail;
+
+ # there is only ONE element in @delete unless we
+ # have bugs in our v2writable deduplication check
+ my @delete;
+ for (; $head != $tail; $head->inc) {
+ my $docid = $head->get_docid;
+ my $doc = $db->get_document($docid);
+ my $smsg = PublicInbox::SearchMsg->wrap($doc, $mid);
+ $smsg->load_expand;
+ push(@delete, $docid) if $smsg->{blob} eq $oid;
+ }
+ $db->delete_document($_) foreach @delete;
+ scalar(@delete);
}
sub term_generator { # write-only
$doc->add_boolean_term('Q' . $_) foreach @$mids;
my $vivified = 0;
+ $self->{skel} and die "Should not have read-only skel here\n";;
foreach my $mid (@$mids) {
$self->each_smsg_by_mid($mid, sub {
my ($cur) = @_;
1;
});
}
+ if ($vivified > 1) {
+ my $id = '<'.join('> <', @$mids).'>';
+ warn "BUG: vivified multiple ($vivified) ghosts for $id\n";
+ }
# not really important, but we return any vivified ghost docid, here:
return $doc_id if defined $doc_id;
link_doc($self, $doc, $refs, $old_tid);
my ($self, $winner_tid, $loser_tid) = @_;
return if $winner_tid == $loser_tid;
my $db = $self->{xdb};
-
- my $batch_size = 1000; # don't let @ids grow too large to avoid OOM
- while (1) {
- my ($head, $tail) = $self->find_doc_ids('G' . $loser_tid);
- return if $head == $tail;
- my @ids;
- for (; $head != $tail && @ids < $batch_size; $head->inc) {
- push @ids, $head->get_docid;
- }
- foreach my $docid (@ids) {
+ batch_do($self, 'G' . $loser_tid, sub {
+ my ($ids) = @_;
+ foreach my $docid (@$ids) {
my $doc = $db->get_document($docid);
$doc->remove_term('G' . $loser_tid);
$doc->add_boolean_term('G' . $winner_tid);
$db->replace_document($docid, $doc);
}
- }
+ });
}
sub _read_git_config_perm {
# remote_* subs are only used by SearchIdxPart and SearchIdxSkeleton
sub remote_commit {
my ($self) = @_;
- print { $self->{w} } "commit\n" or die "failed to write commit: $!";
+ if (my $w = $self->{w}) {
+ print $w "commit\n" or die "failed to write commit: $!";
+ } else {
+ $self->commit_txn_lazy;
+ if (my $skel = $self->{skeleton}) {
+ $skel->commit_txn_lazy;
+ }
+ }
}
sub remote_close {
my ($self) = @_;
- my $pid = delete $self->{pid} or die "no process to wait on\n";
- my $w = delete $self->{w} or die "no pipe to write to\n";
- print $w "close\n" or die "failed to write to pid:$pid: $!\n";
- close $w or die "failed to close pipe for pid:$pid: $!\n";
- waitpid($pid, 0) == $pid or die "remote process did not finish";
- $? == 0 or die ref($self)." pid:$pid exited with: $?";
+ if (my $w = delete $self->{w}) {
+ my $pid = delete $self->{pid} or die "no process to wait on\n";
+ print $w "close\n" or die "failed to write to pid:$pid: $!\n";
+ close $w or die "failed to close pipe for pid:$pid: $!\n";
+ waitpid($pid, 0) == $pid or die "remote process did not finish";
+ $? == 0 or die ref($self)." pid:$pid exited with: $?";
+ } else {
+ die "transaction in progress $self\n" if $self->{txn};
+ $self->_xdb_release if $self->{xdb};
+ }
+}
+
+sub remote_remove {
+ my ($self, $oid, $mid) = @_;
+ if (my $w = $self->{w}) {
+ # triggers remove_by_oid in partition or skeleton
+ print $w "D $oid $mid\n" or die "failed to write remove $!";
+ } else {
+ $self->begin_txn_lazy;
+ $self->remove_by_oid($oid, $mid);
+ }
+}
+
+sub begin_txn_lazy {
+ my ($self) = @_;
+ return if $self->{txn};
+ my $xdb = $self->{xdb} || $self->_xdb_acquire;
+ $xdb->begin_transaction;
+ $self->{txn} = 1;
+}
+
+sub commit_txn_lazy {
+ my ($self) = @_;
+ delete $self->{txn} or return;
+ $self->{xdb}->commit_transaction;
+}
+
+sub worker_done {
+ my ($self) = @_;
+ die "$$ $0 xdb not released\n" if $self->{xdb};
+ die "$$ $0 still in transaction\n" if $self->{txn};
}
1;