We cannot let a client monopolize the single-threaded server
even if it can drain the socket buffer faster than we can
emit data.
While we're at it, acknowledge the this behavior (which happens
naturally) in httpd/async.
The same idea is present in NNTP for the long_response code.
This is the HTTP followup to:
commit
0d0fde0bff97 ("nntp: introduce long response API for streaming")
commit
79d8bfedcdd2 ("nntp: avoid signals for long responses")
my $pull = $self->{pull} = sub {
local $/ = \8192;
my $forward = $self->{forward};
my $pull = $self->{pull} = sub {
local $/ = \8192;
my $forward = $self->{forward};
+ # limit our own running time for fairness with other
+ # clients and to avoid buffering too much:
+ my $n = 100;
while ($forward && defined(my $buf = $forward->getline)) {
$write->($buf);
last if $self->{closed};
while ($forward && defined(my $buf = $forward->getline)) {
$write->($buf);
last if $self->{closed};
- if ($self->{write_buf_size}) {
+ if ((--$n) <= 0 || $self->{write_buf_size}) {
$self->write($self->{pull});
return;
}
$self->write($self->{pull});
return;
}
$self->watch_read(0);
$io->write($restart_read); # D::S::write
}
$self->watch_read(0);
$io->write($restart_read); # D::S::write
}
- return; # stay in watch_read
+ # stay in watch_read, but let other clients
+ # get some work done, too.
+ return;
} elsif (!defined $r) {
return if $!{EAGAIN} || $!{EINTR};
}
} elsif (!defined $r) {
return if $!{EAGAIN} || $!{EINTR};
}