I've hit /proc/sys/fs/pipe-user-pages-* limits on some systems.
So stop hogging resources on pipes which don't benefit from
giant sizes.
Some of these can use eventfd in the future to further reduce
resource use.
sub master_loop {
pipe(my ($p0, $p1)) or die "failed to create parent-pipe: $!";
pipe(my ($r, $w)) or die "failed to create self-pipe: $!";
+
+ if ($^O eq 'linux') { # 1031: F_SETPIPE_SZ = 1031
+ fcntl($_, 1031, 4096) for ($w, $p1);
+ }
+
IO::Handle::blocking($w, 0);
my $set_workers = $worker_processes;
my @caught;
sub once_init () {
my $self = fields::new('PublicInbox::EvCleanup');
my ($r, $w);
+
+ # This is a dummy pipe which is always writable so it can always
+ # fires in the next event loop iteration.
pipe($r, $w) or die "pipe: $!";
+ fcntl($w, 1031, 4096) if $^O eq 'linux'; # 1031: F_SETPIPE_SZ
$self->SUPER::new($w);
$self->{rd} = $r; # never read, since we never write..
$self;
pipe($in_r, $in_w) or fail($self, "pipe failed: $!");
pipe($out_r, $out_w) or fail($self, "pipe failed: $!");
+ if ($^O eq 'linux') { # 1031: F_SETPIPE_SZ
+ fcntl($out_w, 1031, 4096);
+ fcntl($in_w, 1031, 4096) if $batch eq '--batch-check';
+ }
my @cmd = ('git', "--git-dir=$self->{git_dir}", qw(cat-file), $batch);
my $redir = { 0 => fileno($out_r), 1 => fileno($in_w) };
if ($self->{parallel}) {
pipe(my ($r, $w)) or die "pipe failed: $!";
+ # pipe for barrier notifications doesn't need to be big,
+ # 1031: F_SETPIPE_SZ
+ fcntl($w, 1031, 4096) if $^O eq 'linux';
$self->{bnote} = [ $r, $w ];
$w->autoflush(1);
}