${=PARALLEL} --jobs ${(P)2} --joblog $log $3 ::: feeds/* || rc=$?
fpath=($cmds/functions.zsh $fpath)
autoload print-joblog-failed
-print-joblog-failed < $log
+print-joblog-failed <$log
exit ${rc:-0}
cmds="$(dirname "$(realpath -- "$0")")"
. "$cmds/env.rc"
cd "$1"
-read url < url
+read url <url
[ -s etag ] && etag_compare="--etag-compare etag" || :
[ -r out ] && time_cond="--time-cond out" || :
[ -z "$FEEDER_CURL_VERBOSE" ] && silent="--silent" || silent="--verbose"
$silent \
"$url" >&2
if [ -s out ] ; then
- $ZSTD < out > feed.zst
- touch -r out feed.zst
+ cp out feed
+ touch -r out feed
truncate -s 0 out
- touch -r feed.zst out
+ touch -r feed out
fi
-$SHA512 < feed.zst > download.hash
+$SHA512 <feed >download.hash
${=WGET} --user-agent=$FEEDER_USER_AGENT \
--output-document=$dst/$fn $url 2>&2 2>encs.log
print $dst/$fn
- done < $new
+ done <$new
}
_feeder_warc_compress() {
$HOME/work/tofuproxy/warc-extract.cmd -for-enzstd "$1" |
- $HOME/work/tofuproxy/cmd/zstd/enzstd > "$1".zst
+ $HOME/work/tofuproxy/cmd/zstd/enzstd >"$1".zst
rm "$1"
}
echo unreadable $f/title >&2
continue
}
- read title < $f/title
+ read title <$f/title
[ -n "$title" ] && label="-label \"$title\"" || :
echo mailboxes $label $f
echo "folder-hook $f \"macro index r '<shell-escape>$cmds/dnp $f\n<change-folder>=$f\n'\""
[ -s max ] && max=`cat max` || max=$FEEDER_MAX_ITEMS
$ZSTD -d < feed.zst | $cmds/feed2mdir/feed2mdir -max-entries $max . > title.tmp
mv title.tmp title
-echo "$hash_their" > parse.hash
+echo "$hash_their" >parse.hash
${=WGET} $wget_opts --output-file=warcs.log --warc-file=$dst/$fn $url
$FEEDER_WARC_COMPRESS $dst/$fn.warc
print $dst/$fn.warc*
- done < $new
+ done <$new
}
You can set that limit on per-feed basis:
@example
-$ echo 50 > feed/FEED/max
+$ echo 50 >feed/FEED/max
@end example
@strong{0} means no limit and keep all the messages.
@example
$ mkdir -p feeds/my_first_feed/@{cur,new,tmp@}
-$ echo http://example.com/feed.atom > feeds/my_first_feed/url
+$ echo http://example.com/feed.atom >feeds/my_first_feed/url
@end example
Or you can convert Newsboat @file{urls} file (containing many lines with
URLs) with @command{urls2feeds} to subdirectories hierarchy:
@example
-$ ./urls2feeds < ~/.newsboat/urls
+$ ./urls2feeds <~/.newsboat/urls
$ cat feeds/blog.stargrave.org_russian_feed.atom/url
http://blog.stargrave.org/russian/feed.atom
@end example
you can simply add them to running @command{tofuproxy}:
@example
-$ for w (feeds/*/warcs/*.warc) print $w:a > path/to/tofuproxy/fifos/add-warcs
+$ for w (feeds/*/warcs/*.warc) print $w:a >path/to/tofuproxy/fifos/add-warcs
@end example
And then visit @url{http://warc/} URL (when @command{tofuproxy} already
cmds="$(dirname "$(realpath -- "$0")")"/cmd
muttrc_their="$($cmds/muttrc-gen)"
[ -r mutt.rc ] && muttrc_our="$(cat mutt.rc)" || :
-[ "$muttrc_our" = "$muttrc_their" ] || cat > mutt.rc <<EOF
+[ "$muttrc_our" = "$muttrc_their" ] || cat >mutt.rc <<EOF
$muttrc_their
EOF
MUHOME="$PWD/mu" mutt -e "source mutt.rc" -y
while read url ; do
read dir
mkdir -p -- "$dir"/cur "$dir"/new "$dir"/tmp # make it maildir
- echo "$url" > "$dir"/url
+ echo "$url" >"$dir"/url
printf "%s\n" "$dir"
done
seen[$dir]=1
[[ -e $dir ]] && continue || :
mkdir -p $dir/{cur,new,tmp} # make it maildir
- print -- "$url" > $dir/url
+ print -- "$url" >$dir/url
print $dir
done
for dir (feeds/*) [[ $seen[$dir] -eq 1 ]] || print disappeared: $dir