VERSION | 2 +- doc/building.texi | 20 ++++++++++---------- doc/bundles.texi | 8 ++++---- doc/cfg.texi | 13 +++++++++---- doc/cmds.texi | 104 +++++++++++++++++++++++++++-------------------------- doc/download.texi | 5 +++++ doc/index.texi | 2 ++ doc/integration.texi | 126 ++++++++++++++++++++++++++++++++--------------------- doc/integrity.texi | 11 ++++++----- doc/news.ru.texi | 20 ++++++++++++++++++++ doc/news.texi | 18 ++++++++++++++++++ doc/sources.texi | 4 ++-- doc/sp.texi | 9 +++++++++ doc/spool.texi | 4 ++-- doc/usecases.ru.texi | 40 +++++++++++++++++++++------------------- doc/usecases.texi | 40 +++++++++++++++++++++------------------- ports/nncp/Makefile | 2 +- src/call.go | 13 ++++++------- src/cfg.go | 7 +++++++ src/check.go | 25 +++++++++++++------------ src/cmd/nncp-bundle/main.go | 56 ++++++++++++++++++++++++++++++++++++++--------------- src/cmd/nncp-call/main.go | 12 +++++++++++- src/cmd/nncp-caller/main.go | 18 ++++++++++++++---- src/cmd/nncp-cfgmin/main.go | 2 +- src/cmd/nncp-cfgnew/main.go | 2 ++ src/cmd/nncp-check/main.go | 12 +++++++++++- src/cmd/nncp-daemon/main.go | 25 +++++++++++++++++-------- src/cmd/nncp-exec/main.go | 12 +++++++++++- src/cmd/nncp-file/main.go | 12 +++++++++++- src/cmd/nncp-freq/main.go | 12 +++++++++++- src/cmd/nncp-log/main.go | 2 +- src/cmd/nncp-pkt/main.go | 2 +- src/cmd/nncp-reass/main.go | 74 +++++++++++++++++++++++++++++++++-------------------- src/cmd/nncp-rm/main.go | 80 ++++++++++++++++++++++++++++------------------------- src/cmd/nncp-stat/main.go | 2 +- src/cmd/nncp-toss/main.go | 12 +++++++++++- src/cmd/nncp-xfer/main.go | 91 +++++++++++++++++++++++++++++++++++------------------ src/ctx.go | 21 ++++++++++++++++++--- src/humanizer.go | 10 +++++----- src/jobs.go | 5 ++--- src/lockdir.go | 4 ++-- src/log.go | 18 ++++++++++-------- src/progress.go | 151 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/sp.go | 164 ++++++++++++++++++++++++++--------------------------- src/toss.go | 64 ++++++++++++++++++++++++++++------------------------- src/tx.go | 68 ++++++++++++++++++++++++++++++----------------------- src/tx_test.go | 1 + src/uilive/LICENSE | 10 ++++++++++ src/uilive/README.md | 31 +++++++++++++++++++++++++++++++ src/uilive/doc.go | 2 ++ src/uilive/terminal_size.go | 37 +++++++++++++++++++++++++++++++++++++ src/uilive/writer.go | 144 +++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/VERSION b/VERSION index 9871b3c67fa0b9d06b62dc407cd355e0a52c7e10ff11d9c8b206bc1cddb05392..7ce8e3f89092f52ed660586747098cd88e6fcd428f21e17d94ac983ae93279de 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -5.1.2 +5.2.0 diff --git a/doc/building.texi b/doc/building.texi index 091105605015347b3e9752aad2cd609a681fb3ab61b406ca6f691a5d4f1f00cf..a8d3ae2b87ac90ea9c7190d4dc5428b73ce106ff9bb3b9185589a8f2a9e9ac41 100644 --- a/doc/building.texi +++ b/doc/building.texi @@ -10,17 +10,17 @@ @item Debian, Ubuntu @verb{|apt install golang|} @end table -@verbatim -$ [fetch|wget] http://www.nncpgo.org/download/nncp-5.1.2.tar.xz -$ [fetch|wget] http://www.nncpgo.org/download/nncp-5.1.2.tar.xz.sig -$ gpg --verify nncp-5.1.2.tar.xz.sig nncp-5.1.2.tar.xz -$ xz --decompress --stdout nncp-5.1.2.tar.xz | tar xf - -$ make -C nncp-5.1.2 all -@end verbatim +@example +$ [fetch|wget] http://www.nncpgo.org/download/nncp-@value{VERSION}.tar.xz +$ [fetch|wget] http://www.nncpgo.org/download/nncp-@value{VERSION}.tar.xz.sig +$ gpg --verify nncp-@value{VERSION}.tar.xz.sig nncp-@value{VERSION}.tar.xz +$ xz --decompress --stdout nncp-@value{VERSION}.tar.xz | tar xf - +$ make -C nncp-@value{VERSION} all +@end example There is @command{install} make-target respecting @env{DESTDIR}. It will install binaries and info-documentation: -@verbatim -# make -C nncp-5.1.2 install PREFIX=/usr/local -@end verbatim +@example +# make -C nncp-@value{VERSION} install PREFIX=/usr/local +@end example diff --git a/doc/bundles.texi b/doc/bundles.texi index ab567157b65c49e1e986413330422cb3a662d50839e51f98de6dc5b60d08cbf0..8ded7fd2d229f180dea380e649eb098b67d0e2beeae953e98a67440dbe422d1c 100644 --- a/doc/bundles.texi +++ b/doc/bundles.texi @@ -17,23 +17,23 @@ @itemize @item They do not require intermediate storage before recording on either CD-ROM or tape drive. -@verbatim +@example $ nncp-bundle -tx SOMENODE | cdrecord -tao - # record directly to CD $ nncp-bundle -tx SOMENODE | dd of=/dev/sa0 bs=10240 # record directly to tape $ dd if=/dev/cd0 bs=2048 | nncp-bundle -rx # read directly from CD $ dd if=/dev/sa0 bs=10240 | nncp-bundle -rx # read directly from tape -@end verbatim +@end example @item They do not require filesystem existence to deal with, simplifying administration when operating in heterogeneous systems with varying filesystems. No @command{mount}/@command{umount}, @command{zpool import}/@command{zpool export} and struggling with file permissions. -@verbatim +@example $ nncp-bundle -tx SOMENODE | dd of=/dev/da0 bs=1M # record directly to # hard/flash drive $ dd if=/dev/da0 bs=1M | nncp-bundle -rx # read directly from drive -@end verbatim +@end example @item This is the fastest way to record outbound packets for offline transmission -- sequential write is always faster, when no diff --git a/doc/cfg.texi b/doc/cfg.texi index 93b4af969a155b63690586055809074e9de149b08c2d10c850323e0a9344006f..664fabfaad70e16ed5157aebcbc77abab8497854c3b64b87a66b4d0162961439 100644 --- a/doc/cfg.texi +++ b/doc/cfg.texi @@ -8,6 +8,7 @@ { spool: /var/spool/nncp log: /var/spool/nncp/log umask: "022" + noprogress: true notify: { file: { @@ -98,6 +99,10 @@ Non-empty optional @strong{umask} will force all invoked commands to override their umask to specified octal mask. Useful for using with @ref{Shared spool, shared spool directories}. +Enabled @strong{noprogress} option disabled progress showing for many +commands by default. You can always force its showing with +@option{-progress} command line option anyway. + @anchor{CfgNotify} @strong{notify} section contains notification settings for successfully tossed file, freq and exec packets. Corresponding @strong{from} and @@ -143,12 +148,12 @@ @verb{|sendmail: ["/usr/sbin/sendmail", "-t"]|} handle, when called by @verb{|echo hello world | nncp-exec OURNODE sendmail ARG0 ARG1 ARG2|} command, will execute: -@verbatim +@example NNCP_SELF=OURNODE \ NNCP_SENDER=REMOTE \ NNCP_NICE=64 \ /usr/sbin/sendmail -t ARG0 ARG1 ARG2 -@end verbatim +@end example feeding @verb{|hello world\n|} to that started @command{sendmail} process. @@ -230,9 +235,9 @@ users, then you can @command{setgid} it and assure that umask is group friendly. For convenience you can set @option{umask} globally for invoked NNCP commands in the configuration file. For example: -@verbatim +@example $ chgrp nncp /usr/local/etc/nncp.hjson /var/spool/nncp $ chmod g+r /usr/local/etc/nncp.hjson $ chmod g+rwxs /var/spool/nncp $ echo 'umask: "007"' >> /usr/local/etc/nncp.hjson -@end verbatim +@end example diff --git a/doc/cmds.texi b/doc/cmds.texi index 8a5a1b73f4f2051d0fea3eca1426e28e6e05a6d6760063cccd1c1a80b3fb2d33..d3dd19ef332c8a846570512615064f0f8ceff3bcdb81ef40db8acd62ec8cb442 100644 --- a/doc/cmds.texi +++ b/doc/cmds.texi @@ -34,6 +34,8 @@ @item -quiet Print only errors, omit simple informational messages. In any case those messages are logged, so you can reread them using @ref{nncp-log} command. +@item -progress, -noprogress + Either force progress showing, or disable it. @item -version Print version information. @item -warranty @@ -43,11 +45,11 @@ @node nncp-bundle @section nncp-bundle -@verbatim +@example $ nncp-bundle [options] -tx [-delete] NODE [NODE ...] > ... $ nncp-bundle [options] -rx -delete [-dryrun] [NODE ...] < ... $ nncp-bundle [options] -rx [-check] [-dryrun] [NODE ...] < ... -@end verbatim +@end example With @option{-tx} option, this command creates @ref{Bundles, bundle} of @ref{Encrypted, encrypted packets} from the spool directory and writes @@ -78,10 +80,10 @@ with @option{-rx} and @option{-delete} options -- in that mode, stream packets integrity will be checked and they will be deleted from the spool if everything is good. So it is advisable to recheck your streams: -@verbatim +@example $ nncp-bundle -tx ALICE BOB WHATEVER | cdrecord -tao - $ dd if=/dev/cd0 bs=2048 | nncp-bundle -rx -delete -@end verbatim +@end example @option{-dryrun} option prevents any writes to the spool. This is useful when you need to see what packets will pass by and possibly check @@ -90,7 +92,7 @@ @node nncp-call @section nncp-call -@verbatim +@example $ nncp-call [options] [-onlinedeadline INT] [-maxonlinetime INT] @@ -100,7 +102,7 @@ [-pkts PKT,PKT,...] [-rxrate INT] [-txrate INT] NODE[:ADDR] [FORCEADDR] -@end verbatim +@end example Call (connect to) specified @option{NODE} and run @ref{Sync, synchronization} protocol with the @ref{nncp-daemon, daemon} on the @@ -139,9 +141,9 @@ @node nncp-caller @section nncp-caller -@verbatim +@example $ nncp-caller [options] [NODE ...] -@end verbatim +@end example Croned daemon that calls remote nodes from time to time, according to their @ref{CfgCalls, @emph{calls}} configuration field. @@ -155,10 +157,10 @@ @node nncp-cfgenc @section nncp-cfgenc -@verbatim +@example $ nncp-cfgmin [options] [-s INT] [-t INT] [-p INT] cfg.hjson > cfg.hjson.eblob $ nncp-cfgmin [options] -d cfg.hjson.eblob > cfg.hjson -@end verbatim +@end example This command allows you to encrypt provided @file{cfg.hjson} file with the passphrase, producing @ref{EBlob, eblob}, to safely keep your @@ -183,21 +185,21 @@ if passphrase can not decrypt @file{eblob}. @option{-dump} options parses @file{eblob} and prints parameters used during its creation. For example: -@verbatim +@example $ nncp-cfgenc -dump /usr/local/etc/nncp.hjson.eblob Strengthening function: Balloon with BLAKE2b-256 Memory space cost: 1048576 bytes Number of rounds: 16 Number of parallel jobs: 2 Blob size: 2494 -@end verbatim +@end example @node nncp-cfgmin @section nncp-cfgmin -@verbatim +@example $ nncp-cfgmin [options] > stripped.hjson -@end verbatim +@end example Print out stripped configuration version: only path to @ref{Spool, spool}, path to log file, neighbours public keys are stayed. This is @@ -207,9 +209,9 @@ @node nncp-cfgnew @section nncp-cfgnew -@verbatim +@example $ nncp-cfgnew [options] [-nocomments] > new.hjson -@end verbatim +@end example Generate new node configuration: private keys, example configuration file and print it to stdout. You must use this command when you setup @@ -222,9 +224,9 @@ @node nncp-check @section nncp-check -@verbatim +@example $ nncp-check [options] -@end verbatim +@end example Perform @ref{Spool, spool} directory integrity check. Read all files that has Base32-encoded filenames and compare it with recalculated @@ -234,9 +236,9 @@ @node nncp-daemon @section nncp-daemon -@verbatim +@example $ nncp-daemon [options] [-maxconn INT] [-bind ADDR] [-inetd] -@end verbatim +@end example Start listening TCP daemon, wait for incoming connections and run @ref{Sync, synchronization protocol} with each of them. You can run @@ -257,9 +259,9 @@ @node nncp-exec @section nncp-exec -@verbatim +@example $ nncp-exec [options] NODE HANDLE [ARG0 ARG1 ...] -@end verbatim +@end example Send execution command to @option{NODE} for specified @option{HANDLE}. Body is read from stdin and compressed. After receiving, remote side @@ -279,13 +281,13 @@ then executing @verb{|echo My message | nncp-exec -replynice 123 REMOTE sendmail root@localhost|} will lead to execution of: -@verbatim +@example echo My message | NNCP_SELF=REMOTE \ NNCP_SENDER=OurNodeId \ NNCP_NICE=123 \ - /usr/sbin/sendmail -t root@localhost -@end verbatim + /usr/sbin/sendmail -t root@@localhost +@end example If @ref{CfgNotify, notification} is enabled on the remote side for exec handles, then it will sent simple letter after successful command @@ -294,9 +296,9 @@ @node nncp-file @section nncp-file -@verbatim +@example $ nncp-file [options] [-chunked INT] SRC NODE:[DST] -@end verbatim +@end example Send @file{SRC} file to remote @option{NODE}. @file{DST} specifies destination file name in remote's @ref{CfgIncoming, incoming} @@ -342,9 +344,9 @@ @node nncp-freq @section nncp-freq -@verbatim +@example $ nncp-freq [options] NODE:SRC [DST] -@end verbatim +@end example Send file request to @option{NODE}, asking it to send its @file{SRC} file from @ref{CfgFreq, freq.path} directory to our node under @file{DST} @@ -358,45 +360,45 @@ @node nncp-log @section nncp-log -@verbatim +@example $ nncp-log [options] -@end verbatim +@end example Parse @ref{Log, log} file and print out its records in human-readable form. @node nncp-pkt @section nncp-pkt -@verbatim +@example $ nncp-pkt [options] < pkt $ nncp-pkt [options] [-decompress] -dump < pkt > payload $ nncp-pkt -overheads -@end verbatim +@end example Low level packet parser. Normally it should not be used, but can help in debugging. By default it will print packet's type, for example: -@verbatim +@example Packet type: encrypted Niceness: 64 Sender: 2WHBV3TPZHDOZGUJEH563ZEK7M33J4UESRFO4PDKWD5KZNPROABQ -@end verbatim +@end example If you specify @option{-dump} option and provide an @ref{Encrypted, encrypted} packet, then it will verify and decrypt it to stdout. Encrypted packets contain @ref{Plain, plain} ones, that also can be fed to @command{nncp-pkt}: -@verbatim +@example Packet type: plain Payload type: transitional Path: VHMTRWDOXPLK7BR55ICZ5N32ZJUMRKZEMFNGGCEAXV66GG43PEBQ Packet type: plain Payload type: mail -Path: stargrave@stargrave.org -@end verbatim +Path: stargrave@@stargrave.org +@end example And with the @option{-dump} option it will give you the actual payload (the whole file, mail message, and so on). @option{-decompress} option @@ -408,10 +410,10 @@ @node nncp-reass @section nncp-reass -@verbatim +@example $ nncp-reass [options] [-dryrun] [-keep] [-dump] [-stdout] FILE.nncp.meta -$ nncp-reass [options] [-dryrun] [-keep] {-all | -node NODE} -@end verbatim +$ nncp-reass [options] [-dryrun] [-keep] @{-all | -node NODE@} +@end example Reassemble @ref{Chunked, chunked file} after @ref{nncp-toss, tossing}. @@ -450,7 +452,7 @@ and/or separate storage device for higher performance. @option{-dump} option prints meta-file contents in human-friendly form. It is useful mainly for debugging purposes. For example: -@verbatim +@example Original filename: testfile File size: 3.8 MiB (3987795 bytes) Chunk size: 1.0 MiB (1048576 bytes) @@ -460,21 +462,21 @@ 0: eac60d819edf40b8ecdacd0b9a5a8c62de2d15eef3c8ca719eafa0be9b894017 1: 013a07e659f2e353d0e4339c3375c96c7fffaa2fa00875635f440bbc4631052a 2: f4f883975a663f2252328707a30e71b2678f933b2f3103db8475b03293e4316e 3: 0e9e229501bf0ca42d4aa07393d19406d40b179f3922a3986ef12b41019b45a3 -@end verbatim +@end example Do not forget about @ref{ChunkedZFS, possible} ZFS deduplication issues. @node nncp-rm @section nncp-rm -@verbatim +@example $ nncp-rm [options] -tmp $ nncp-rm [options] -lock $ nncp-rm [options] -node NODE -part $ nncp-rm [options] -node NODE -seen $ nncp-rm [options] -node NODE [-rx] [-tx] $ nncp-rm [options] -node NODE -pkt PKT -@end verbatim +@end example This command is aimed to delete various files from your spool directory: @@ -497,9 +499,9 @@ @node nncp-stat @section nncp-stat -@verbatim +@example $ nncp-stat [options] [-node NODE] -@end verbatim +@end example Print current @ref{Spool, spool} statistics about unsent and unprocessed packets. For each node (unless @option{-node} specified) and each @@ -509,7 +511,7 @@ @node nncp-toss @section nncp-toss -@verbatim +@example $ nncp-toss [options] [-node NODE] [-dryrun] @@ -519,7 +521,7 @@ [-nofile] [-nofreq] [-noexec] [-notrns] -@end verbatim +@end example Perform "tossing" operation on all inbound packets. This is the tool that decrypts all packets and processes all payload packets in them: @@ -545,9 +547,9 @@ @node nncp-xfer @section nncp-xfer -@verbatim +@example $ nncp-xfer [options] [-node NODE] [-mkdir] [-keep] [-rx|-tx] DIR -@end verbatim +@end example Search for directory in @file{DIR} containing inbound packets for us and move them to local @ref{Spool, spool} directory. Also search for known diff --git a/doc/download.texi b/doc/download.texi index ea4e122f703d4ad9ab0639a5827aece8cbc143bdb3f2de5bf36ab5e826dc1691..ba484b574fe64d5f5536fbe192cea0c4d539fede8c6f9eccf594158b896f0623 100644 --- a/doc/download.texi +++ b/doc/download.texi @@ -12,6 +12,7 @@ @item @code{github.com/davecgh/go-xdr} @tab ISC @item @code{github.com/dustin/go-humanize} @tab MIT @item @code{github.com/flynn/noise} @tab BSD 3-Clause @item @code{github.com/gorhill/cronexpr} @tab GNU GPLv3 +@item @code{github.com/gosuri/uilive} @tab MIT @item @code{github.com/hjson/hjson-go} @tab MIT @item @code{github.com/klauspost/compress} @tab BSD 3-Clause @item @code{go.cypherpunks.ru/balloon} @tab GNU LGPLv3 @@ -22,6 +23,10 @@ @end multitable @multitable {XXXXX} {XXXX-XX-XX} {XXXX KiB} {link sign} {xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} @headitem Version @tab Date @tab Size @tab Tarball @tab SHA256 checksum + +@item @ref{Release 5.1.2, 5.1.2} @tab 2019-12-13 @tab 1106 KiB +@tab @url{download/nncp-5.1.2.tar.xz, link} @url{download/nncp-5.1.2.tar.xz.sig, sign} +@tab @code{52B2043B 1B22D20F C44698EC AFE5FF46 F99B4DD5 2C392D4D 25FE1580 993263B3} @item @ref{Release 5.1.1, 5.1.1} @tab 2019-12-01 @tab 1103 KiB @tab @url{download/nncp-5.1.1.tar.xz, link} @url{download/nncp-5.1.1.tar.xz.sig, sign} diff --git a/doc/index.texi b/doc/index.texi index 32ce8a71a230eaf4fce048c86449335e2fb34019c859c8c016b09c41f23c5905..01adc44c0fcd94a8be8382020fb55f04c04486df66be6cfbb1b75d6a4d5ec8b0 100644 --- a/doc/index.texi +++ b/doc/index.texi @@ -2,6 +2,8 @@ \input texinfo @documentencoding UTF-8 @settitle NNCP +@set VERSION 5.2.0 + @copying This manual is for NNCP (Node to Node copy) -- collection of utilities simplifying secure store-and-forward files and mail exchanging. diff --git a/doc/integration.texi b/doc/integration.texi index 836d1d58efe07b8d981326e9114d2a5b26626a95da1fb761a1368062453f67c6..a4abbf5ba9f25bf0c9a7e8241b5f6e15892320e8bbc33151655ab9e25249e822 100644 --- a/doc/integration.texi +++ b/doc/integration.texi @@ -23,13 +23,13 @@ want to freq from. Because files can be updated there. It is useful to run cron-ed job on it to create files listing you can freq and search for files in it: -@verbatim +@example 0 4 * * * cd /storage ; tmp=`mktemp` ; \ tree -f -h -N --du --timefmt \%Y-\%m-\%d | zstdmt -19 > $tmp && chmod 644 $tmp && mv $tmp TREE.txt.zst ; \ tree -J -f --timefmt \%Y-\%m-\%d | zstdmt -19 > $tmp && chmod 644 $tmp && mv $tmp TREE.json.zst -@end verbatim +@end example @node Postfix @section Integration with Postfix @@ -51,11 +51,11 @@ the Postfix @command{sendmail} command. @item Define a @command{pipe(8)} based mail delivery transport for delivery via NNCP: -@verbatim +@example /usr/local/etc/postfix/master.cf: nncp unix - n n - - pipe flags=F user=nncp argv=nncp-exec -quiet $nexthop sendmail $recipient -@end verbatim +@end example This runs the @command{nncp-exec} command to place outgoing mail into the NNCP queue after replacing @var{$nexthop} by the the receiving NNCP @@ -67,11 +67,11 @@ @item Specify that mail for @emph{example.com}, should be delivered via NNCP, to a host named @emph{nncp-host}: -@verbatim +@example /usr/local/etc/postfix/transport: example.com nncp:nncp-host .example.com nncp:nncp-host -@end verbatim +@end example See the @command{transport(5)} manual page for more details. @@ -80,18 +80,18 @@ whenever you change the @file{transport} file. @item Enable @file{transport} table lookups: -@verbatim +@example /usr/local/etc/postfix/main.cf: transport_maps = hash:$config_directory/transport -@end verbatim +@end example @item Add @emph{example.com} to the list of domains that your site is willing to relay mail for. -@verbatim +@example /usr/local/etc/postfix/main.cf: relay_domains = example.com ...other relay domains... -@end verbatim +@end example See the @option{relay_domains} configuration parameter description for details. @@ -114,27 +114,27 @@ @item Specify that all remote mail must be sent via the @command{nncp} mail transport to your NNCP gateway host, say, @emph{nncp-gateway}: -@verbatim +@example /usr/local/etc/postfix/main.cf: relayhost = nncp-gateway default_transport = nncp -@end verbatim +@end example Postfix 2.0 and later also allows the following more succinct form: -@verbatim +@example /usr/local/etc/postfix/main.cf: default_transport = nncp:nncp-gateway -@end verbatim +@end example @item Define a @command{pipe(8)} based message delivery transport for mail delivery via NNCP: -@verbatim +@example /usr/local/etc/postfix/master.cf: nncp unix - n n - - pipe flags=F user=nncp argv=nncp-exec -quiet $nexthop sendmail $recipient -@end verbatim +@end example This runs the @command{nncp-exec} command to place outgoing mail into the NNCP queue. It substitutes the hostname (@emph{nncp-gateway}, or @@ -159,33 +159,40 @@ program supports ETags and won't pollute the channel if remote server supports them too. After installing @command{rss2email}, create configuration file: -@verbatim -$ r2e new rss-robot@address.com -@end verbatim + +@example +$ r2e new rss-robot@@address.com +@end example + and add feeds you want to retrieve: -@verbatim + +@example $ r2e add https://git.cypherpunks.ru/cgit.cgi/nncp.git/atom/?h=master -@end verbatim +@end example + and run the process: -@verbatim + +@example $ r2e run -@end verbatim +@end example @node WARCs @section Integration with Web pages Simple HTML web page can be downloaded very easily for sending and viewing it offline after: -@verbatim + +@example $ wget http://www.example.com/page.html -@end verbatim +@end example But most web pages contain links to images, CSS and JavaScript files, required for complete rendering. @url{https://www.gnu.org/software/wget/, GNU Wget} supports that documents parsing and understanding page dependencies. You can download the whole page with dependencies the following way: -@verbatim + +@example $ wget \ --page-requisites \ --convert-links \ @@ -195,19 +202,22 @@ --span-hosts \ --random-wait \ --execute robots=off \ http://www.example.com/page.html -@end verbatim +@end example + that will create @file{www.example.com} directory with all files necessary to view @file{page.html} web page. You can create single file compressed tarball with that directory and send it to remote node: -@verbatim + +@example $ tar cf - www.example.com | zstd | nncp-file - remote.node:www.example.com-page.tar.zst -@end verbatim +@end example But there are multi-paged articles, there are the whole interesting sites you want to get in a single package. You can mirror the whole web site by utilizing @command{wget}'s recursive feature: -@verbatim + +@example $ wget \ --recursive \ --timestamping \ @@ -216,20 +226,22 @@ --no-remove-listing \ --no-parent \ [...] http://www.example.com/ -@end verbatim +@end example There is a standard for creating @url{https://en.wikipedia.org/wiki/Web_ARChive, Web ARChives}: @strong{WARC}. Fortunately again, @command{wget} supports it as an output format. -@verbatim + +@example $ wget \ --warc-file www.example_com-$(date '+%Y%M%d%H%m%S') \ --no-warc-compression \ --no-warc-keep-log \ [...] http://www.example.com/ -@end verbatim +@end example + That command will create uncompressed @file{www.example_com-XXX.warc} web archive. By default, WARCs are compressed using @url{https://en.wikipedia.org/wiki/Gzip, gzip}, but, in example above, @@ -241,12 +253,13 @@ There are plenty of software acting like HTTP proxy for your browser, allowing to view that WARC files. However you can extract files from that archive using @url{https://pypi.python.org/pypi/Warcat, warcat} utility, producing usual directory hierarchy: -@verbatim + +@example $ python3 -m warcat extract \ www.example_com-XXX.warc \ --output-dir www.example.com-XXX \ --progress -@end verbatim +@end example @node BitTorrent @section BitTorrent and huge files @@ -267,12 +280,14 @@ connections. You can queue you files after they are completely downloaded. @file{aria2-downloaded.sh} contents: + @verbatiminclude aria2-downloaded.sh Also you can prepare @url{http://aria2.github.io/manual/en/html/aria2c.html#files, input file} with the jobs you want to download: -@verbatim + +@example $ cat jobs http://www.nncpgo.org/download/nncp-0.11.tar.xz out=nncp.txz @@ -281,7 +296,8 @@ out=nncp.txz.sig $ aria2c \ --on-download-complete aria2-downloaded.sh \ --input-file jobs -@end verbatim +@end example + and all that downloaded (@file{nncp.txz}, @file{nncp.txz.sig}) files will be sent to @file{remote.node} when finished. @@ -305,15 +321,17 @@ } @end verbatim @file{warcer.sh} contents: + @verbatiminclude warcer.sh @file{wgeter.sh} contents: + @verbatiminclude wgeter.sh Now you can queue that node to send you some website's page, file or BitTorrents: -@verbatim +@example $ echo http://www.nncpgo.org/Postfix.html | nncp-exec remote.node warcer postfix-whole-page $ echo http://www.nncpgo.org/Postfix.html | @@ -322,7 +340,7 @@ $ echo \ http://www.nncpgo.org/download/nncp-0.11.tar.xz http://www.nncpgo.org/download/nncp-0.11.tar.xz.sig | nncp-exec remote.node aria2c -@end verbatim +@end example @node Git @section Integration with Git @@ -333,32 +351,37 @@ @url{https://git-scm.com/docs/git-bundle, git-bundle} command is everything you need. Use it to create bundles containing all required blobs/trees/commits and tags: -@verbatim + +@example $ git bundle create repo-initial.bundle master --tags --branches $ git tag -f last-bundle $ nncp-file repo-initial.bundle remote.node:repo-$(date % '+%Y%M%d%H%m%S').bundle -@end verbatim +@end example Do usual working with the Git: commit, add, branch, checkout, etc. When you decide to queue your changes for sending, create diff-ed bundle and transfer them: -@verbatim + +@example $ git bundle create repo-$(date '+%Y%M%d%H%m%S').bundle last-bundle..master or maybe $ git bundle create repo-$(date '+%Y%M%d').bundle --since=10.days master -@end verbatim +@end example Received bundle on remote machine acts like usual remote: -@verbatim + +@example $ git clone -b master repo-XXX.bundle -@end verbatim +@end example + overwrite @file{repo.bundle} file with newer bundles you retrieve and fetch all required branches and commits: -@verbatim + +@example $ git pull # assuming that origin remote points to repo.bundle $ git fetch repo.bundle master:localRef $ git ls-remote repo.bundle -@end verbatim +@end example Bundles are also useful when cloning huge repositories (like Linux has). Git's native protocol does not support any kind of interrupted download @@ -369,7 +392,9 @@ bundle, you can add an ordinary @file{git://} remote and fetch the difference. Also you can find the following exec-handler useful: + @verbatiminclude git-bundler.sh + And it allows you to request for bundles like that: @code{echo some-old-commit..master | nncp-exec REMOTE bundler REPONAME}. @@ -383,8 +408,9 @@ many of them are supported, including @emph{Dailymotion}, @emph{Vimeo} and @emph{YouTube}. When you multimedia becomes an ordinary file, you can transfer it easily. -@verbatim + +@example $ youtube-dl \ - --exec 'nncp-file {} remote.node:' \ + --exec 'nncp-file @{@} remote.node:' \ 'https://www.youtube.com/watch?list=PLd2Cw8x5CytxPAEBwzilrhQUHt_UN10FJ' -@end verbatim +@end example diff --git a/doc/integrity.texi b/doc/integrity.texi index 5458e3f57837f74e64c07d1c7c739ef7287a702e58abfb57b9423a5c029c88cd..d2e33f0efef8ab90c514ab68d9a487d58b9edf38a72914ab87d18f04768a6f04 100644 --- a/doc/integrity.texi +++ b/doc/integrity.texi @@ -19,10 +19,10 @@ @itemize @item -@verbatim +@example $ gpg --auto-key-locate dane --locate-keys releases at nncpgo dot org $ gpg --auto-key-locate wkd --locate-keys releases at nncpgo dot org -@end verbatim +@end example @item @verbatiminclude .well-known/openpgpkey/hu/i4cdqgcarfjdjnba6y4jnf498asg8c6p.asc @@ -30,6 +30,7 @@ @end itemize Then you could verify tarballs signature: -@verbatim -$ gpg --verify nncp-5.1.2.tar.xz.sig nncp-5.1.2.tar.xz -@end verbatim + +@example +$ gpg --verify nncp-@value{VERSION}.tar.xz.sig nncp-@value{VERSION}.tar.xz +@end example diff --git a/doc/news.ru.texi b/doc/news.ru.texi index e0b0dadb5a52c123c908b39c713528a5bb33f3bd14fe849f8a396aea5762f76d..d79075844a69ddea04a9a2265b0fe09fb1d73b6d5e2bcff95b1661203bbecd63 100644 --- a/doc/news.ru.texi +++ b/doc/news.ru.texi @@ -1,6 +1,26 @@ @node Новости @section Новости +@node Релиз 5.2.0 +@subsection Релиз 5.2.0 +@itemize + +@item +Большинство команд по умолчанию показывают однострочный прогресс +выполнения операции. Появились @option{-progress}, @option{-noprogress} +опции командной строки, @option{noprogress} опция конфигурационного +файла. + +@item +Исправлен некорректный код возврата @command{nncp-check} команды, +который возвращал ошибку когда всё хорошо. + +@item +Проверка свободного места для пакетов, во время выполнения +@command{nncp-bundle -rx}. + +@end itemize + @node Релиз 5.1.2 @subsection Релиз 5.1.2 @itemize diff --git a/doc/news.texi b/doc/news.texi index 052be070bc6b436b9dc76bbfed42c85b47a35d8e3376452f163df7096dfd6c3b..57f8311a4e2afd10d1a73e8df024fb71b415f6d4aa9cf4e4f856ad084ba9490b 100644 --- a/doc/news.texi +++ b/doc/news.texi @@ -3,6 +3,24 @@ @unnumbered News See also this page @ref{Новости, on russian}. +@node Release 5.2.0 +@section Release 5.2.0 +@itemize + +@item +Most commands by default show oneline operations progress. +@option{-progress}, @option{-noprogress} command line options, +@option{noprogress} configuration file option appeared. + +@item +Fixed incorrect @command{nncp-check} command return code, that returned +bad code when everything is good. + +@item +Free disk space check during @command{nncp-bundle -rx} call. + +@end itemize + @node Release 5.1.2 @section Release 5.1.2 @itemize diff --git a/doc/sources.texi b/doc/sources.texi index bb7c7ad014fac4967e92574f217d0401885bea5ab86a11cd27431c6ed7220f20..ad02cef5dfe332f97cd061cfab97b7bbaebdea82d8b0d55fbdd4739321acb208 100644 --- a/doc/sources.texi +++ b/doc/sources.texi @@ -6,11 +6,11 @@ be buggy. It does not contain compiled documentation and dependent libraries source code. Because of that, it is recommended for porters to use @ref{Tarballs, tarballs} instead. -@verbatim +@example $ git clone git://git.cypherpunks.ru/nncp.git $ cd nncp $ git checkout develop -@end verbatim +@end example Also there is mirror of dependent libraries for safety if their native repositories will be unavailable (they are seldom updated): diff --git a/doc/sp.texi b/doc/sp.texi index ce8427eb7bc7cefd815b7001dd20c8c26482689a60e153ad1cf74ef4f72f67a7..7c99c4c96dd9d1f749c017e88d4be6ea6192bdf4dccfe0c06c0280c7c778f641 100644 --- a/doc/sp.texi +++ b/doc/sp.texi @@ -54,6 +54,7 @@ Stop file transmission, empty sending queue on the remote side. Actually @emph{HALT} packet does not have any body, only the header with the type. It is also used in the first payload for padding to the maximum size. + @verbatim +------+ | HALT | @@ -62,11 +63,13 @@ @end verbatim @item INFO Information about the file we have for transmission. + @verbatim +------+--------------------+ | INFO | NICE | SIZE | HASH | +------+--------------------+ @end verbatim + @multitable @columnfractions 0.2 0.3 0.5 @headitem @tab XDR type @tab Value @item Niceness @tab @@ -83,11 +86,13 @@ @item FREQ File transmission request. Ask remote side to queue the file for transmission. + @verbatim +------+---------------+ | FREQ | HASH | OFFSET | +------+---------------+ @end verbatim + @multitable @columnfractions 0.2 0.3 0.5 @headitem @tab XDR type @tab Value @item Hash @tab @@ -100,11 +105,13 @@ @end multitable @item FILE Chunk of file. + @verbatim +------+-------------------------+ | FILE | HASH | OFFSET | PAYLOAD | +------+-------------------------+ @end verbatim + @multitable @columnfractions 0.2 0.3 0.5 @headitem @tab XDR type @tab Value @item Hash @tab @@ -120,11 +127,13 @@ @end multitable @item DONE Signal remote side that we have successfully downloaded the file. + @verbatim +------+------+ | DONE | HASH | +------+------+ @end verbatim + @multitable @columnfractions 0.2 0.3 0.5 @headitem @tab XDR type @tab Value @item Hash @tab diff --git a/doc/spool.texi b/doc/spool.texi index 611c422f3cffbc0874ad16d6c9b4fe6f73894af5d104474d37551f4b6d6437b4..03951b8e46b62c6fc5898a59791260faa14aeb4819f7640e7d4420ce23fa173d 100644 --- a/doc/spool.texi +++ b/doc/spool.texi @@ -5,7 +5,7 @@ Spool directory holds @ref{Encrypted, encrypted packets} received from remote nodes and queued for sending to them. It has the following example structure: -@verbatim +@example spool/tmp/ spool/2WHB...OABQ/rx.lock spool/2WHB...OABQ/rx/5ZIB...UMKW.part @@ -17,7 +17,7 @@ spool/BYRR...CG6Q/tx/AQUT...DGNT.seen spool/BYRR...CG6Q/tx/NSYY...ZUU6 spool/BYRR...CG6Q/tx/VCSR...3VXX.seen spool/BYRR...CG6Q/tx/ZI5U...5RRQ -@end verbatim +@end example Except for @file{tmp}, all other directories are Base32-encoded node identifiers (@file{2WHB...OABQ}, @file{BYRR...CG6Q} in our example). diff --git a/doc/usecases.ru.texi b/doc/usecases.ru.texi index 7656e163dcf48e478dffdc1d7e925f03e1e27b684e00c5cd7d3a654fbcfdf3ca..a395e5d54d2e233491ceb9fd224ae9c2403bebca0968530f2ed5011917ef26ae 100644 --- a/doc/usecases.ru.texi +++ b/doc/usecases.ru.texi @@ -79,10 +79,10 @@ @ref{nncp-daemon, TCP демон}. Команды: -@verbatim +@example $ nncp-file file_i_want_to_send bob: $ nncp-file another_file bob:movie.avi -@end verbatim +@end example добавят в очередь отправки два файла для узла @emph{bob}. Выстрелил-и-забыл! Теперь это работа демона (или offline передачи) @@ -105,12 +105,12 @@ имеет уровень "приятности", который гарантирует что он будет обработан раньше или позднее остальных. Почти все команды имеют соответствующую опцию: -@verbatim +@example $ nncp-file -nice FLASH myfile node:dst $ nncp-xfer -nice PRIORITY /mnt/shared $ nncp-call -nice NORMAL bob [...] -@end verbatim +@end example Огромные файлы могут быть разбиты на маленькие @ref{Chunked, части}, давая возможность передачи, по сути, любых объёмов используя накопители @@ -118,10 +118,10 @@ небольших размеров. Вы также можете использовать CD-ROM и ленточные накопители: -@verbatim +@example $ nncp-bundle -tx bob | cdrecord -tao - $ nncp-bundle -tx bob | dd of=/dev/sa0 bs=10240 -@end verbatim +@end example @node UsecaseNoLinkRU @subsection Экстремальные наземные окружающие условия, нет связи @@ -135,9 +135,9 @@ Представьте, что вы послали два файла узлу @emph{bob}. Вставьте USB устройство (SD гораздо предпочтительнее!) хранения, подмонтируйте и запустите @ref{nncp-xfer}: -@verbatim +@example $ nncp-xfer -node bob /media/usbstick -@end verbatim +@end example чтобы скопировать все исходящие пакеты относящиеся к @emph{bob}. Используйте @option{-mkdir} опцию чтобы создать все необходимые @@ -148,16 +148,16 @@ Если вы используете один и тот же накопитель для передачи данных и к @emph{bob} и к @emph{alice}, то тогда просто не указывайте @option{-node} опцию, чтобы скопировать все доступные исходящие пакеты. -@verbatim +@example $ nncp-xfer /media/usbstick -@end verbatim +@end example Размонтируйте и передайте накопитель Бобу и Алисе. Когда они вставят накопитель в свои компьютеры, то выполнят точно такую же команду: -@verbatim +@example $ nncp-xfer /media/usbstick -@end verbatim +@end example чтобы найти все пакеты относящиеся к их узлу и локально скопируют для дальнейшей обработки. @command{nncp-xfer} это единственная команда @@ -175,10 +175,10 @@ Вы можете использовать, так называемые, @ref{Bundles, пачки} и потоково отсылать их. Они -- всего-лишь последовательность @ref{Encrypted, зашифрованных пакетов}, которые вы можете принять. -@verbatim +@example $ nncp-bundle -tx alice bob eve ... | команда для отправки широковещательной рассылки $ команда для приёма широковещательной рассылки | nncp-bundle -rx -@end verbatim +@end example Встроенная возможность определять дубляжи пакетов позволит вам переотправлять широковещательные рассылки время от времени, повышая @@ -243,15 +243,17 @@ аутентификации узлов и предоставляет эффективный (оба участника могут отослать полезную нагрузку сразу же в самом первом пакете) безопасный транспорт с свойством совершенной прямой секретности. -@verbatim -$ nncp-daemon -bind [::]:5400 -@end verbatim +@example +$ nncp-daemon -bind "[::]":5400 +@end example + запустит TCP демон, который будет слушать входящие соединения на всех интерфейсах. -@verbatim +@example $ nncp-call bob -@end verbatim +@end example + попытается подключиться к известному TCP-адресу узла @emph{bob} (взятого из конфигурационного файла), послать все связанные с ним исходящие пакеты и получить от него. Все прерванные передачи будут автоматически diff --git a/doc/usecases.texi b/doc/usecases.texi index 487ae5534b17d5806f23d336d8da790e5b8e5853926de80e46c8462e37074f3d..40365fc9e9eef386f8d0f27ab2a7836ca08a18d43838b552a6ad3d09244dba11 100644 --- a/doc/usecases.texi +++ b/doc/usecases.texi @@ -77,10 +77,10 @@ daemon}. The command: -@verbatim +@example $ nncp-file file_i_want_to_send bob: $ nncp-file another_file bob:movie.avi -@end verbatim +@end example will queue two files for sending to @emph{bob} node. Fire and forget! Now this is daemon's job (or offline transfer) to send this files part @@ -100,12 +100,12 @@ NNCP allows traffic @ref{Niceness, prioritizing}: each packet has niceness level, that will guarantee that it will be processed earlier or later than the other ones. Nearly all commands has corresponding option: -@verbatim +@example $ nncp-file -nice FLASH myfile node:dst $ nncp-xfer -nice PRIORITY /mnt/shared $ nncp-call -nice NORMAL bob [...] -@end verbatim +@end example Huge files could be split on smaller @ref{Chunked, chunks}, giving possibility to transfer virtually any volumes using small capacity @@ -113,10 +113,10 @@ storages. You can also use CD-ROM and tape drives: -@verbatim +@example $ nncp-bundle -tx bob | cdrecord -tao - $ nncp-bundle -tx bob | dd of=/dev/sa0 bs=10240 -@end verbatim +@end example @node UsecaseNoLink @section Extreme terrestrial environments, no link @@ -128,9 +128,9 @@ Assume that you send two files to @emph{bob} node. Insert USB storage device (SD is preferable!), mount it and run @ref{nncp-xfer}: -@verbatim +@example $ nncp-xfer -node bob /media/usbstick -@end verbatim +@end example to copy all outbound packets related to @emph{bob}. Use @option{-mkdir} option to create related directory on USB/SD storage if they are missing @@ -140,16 +140,16 @@ If you use single storage device to transfer data both to @emph{bob} and @emph{alice}, then just omit @option{-node} option to copy all available outgoing packets. -@verbatim +@example $ nncp-xfer /media/usbstick -@end verbatim +@end example Unmount it and transfer storage to Bob and Alice. When they will insert it in their computers, they will use exactly the same command: -@verbatim +@example $ nncp-xfer /media/usbstick -@end verbatim +@end example to find all packets related to their node and copy them locally for further processing. @command{nncp-xfer} is the only command used with @@ -165,10 +165,10 @@ You can use @ref{Bundles, bundles} and stream them above. They are just a sequence of @ref{Encrypted, encrypted packets} you can catch on. -@verbatim +@example $ nncp-bundle -tx alice bob eve ... | command to send broadcast $ command to receive broadcast | nncp-bundle -rx -@end verbatim +@end example With built-in packet duplicates detection ability, you can retransmit your broadcasts from time to time, to increase chances the recipient @@ -229,15 +229,17 @@ authenticate peers and provide effective (both participants send payload in the very first packet) secure transport with forward secrecy property. -@verbatim -$ nncp-daemon -bind [::]:5400 -@end verbatim +@example +$ nncp-daemon -bind "[::]":5400 +@end example + will start TCP daemon listening on all interfaces for incoming connections. -@verbatim +@example $ nncp-call bob -@end verbatim +@end example + will try to connect to @emph{bob}'s node known TCP addresses (taken from configuration file) and send all related outbound packets and retrieve those the Bob has. All interrupted transfers will be automatically diff --git a/ports/nncp/Makefile b/ports/nncp/Makefile index b248e505dc5ca7a4211d9a9e5e83ee008a6114752d6d94bdea8010b9369ea054..dc7db38fad46940aaa536c3412e01e4c510c487576d13bd3265b3432b86ca19f 100644 --- a/ports/nncp/Makefile +++ b/ports/nncp/Makefile @@ -1,7 +1,7 @@ # $FreeBSD: head/net/nncp/Makefile 517819 2019-11-17 11:51:56Z dmgk $ PORTNAME= nncp -DISTVERSION= 5.1.2 +DISTVERSION= 5.2.0 CATEGORIES= net MASTER_SITES= http://www.nncpgo.org/download/ diff --git a/src/call.go b/src/call.go index 694eb90fe73fe655c45fe40f09d5536444482315621e00ab7cd29878d08bfb63..7ae08d3cc6be065205462f584223704be07655e7ffd6c6348b4580470dc31ee2 100644 --- a/src/call.go +++ b/src/call.go @@ -19,7 +19,6 @@ package nncp import ( "net" - "strconv" "github.com/gorhill/cronexpr" ) @@ -77,17 +76,17 @@ ctx.LogI("call-start", sds, "connected") state.Wait() ctx.LogI("call-finish", SDS{ "node": state.Node.Id, - "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10), - "rxbytes": strconv.FormatInt(state.RxBytes, 10), - "txbytes": strconv.FormatInt(state.TxBytes, 10), - "rxspeed": strconv.FormatInt(state.RxSpeed, 10), - "txspeed": strconv.FormatInt(state.TxSpeed, 10), + "duration": int64(state.Duration.Seconds()), + "rxbytes": state.RxBytes, + "txbytes": state.TxBytes, + "rxspeed": state.RxSpeed, + "txspeed": state.TxSpeed, }, "") isGood = true conn.Close() break } else { - ctx.LogE("call-start", SdsAdd(sds, SDS{"err": err}), "") + ctx.LogE("call-start", sds, err, "") conn.Close() } } diff --git a/src/cfg.go b/src/cfg.go index df1673f9c6eb1bcc8b215910ed2a2dbe853a5f8dd0810a5e3c52e717493ae4ff..51d47e413fb191cded95bbe14cd607fcedf3f208dddc37d5598a150d992f6e89 100644 --- a/src/cfg.go +++ b/src/cfg.go @@ -108,6 +108,8 @@ Spool string `json:"spool"` Log string `json:"log"` Umask string `json:"umask",omitempty` + OmitPrgrs bool `json:"noprogress",omitempty` + Notify *NotifyJSON `json:"notify,omitempty"` Self *NodeOurJSON `json:"self"` @@ -423,10 +425,15 @@ } rInt := int(r) umaskForce = &rInt } + showPrgrs := true + if cfgJSON.OmitPrgrs { + showPrgrs = false + } ctx := Ctx{ Spool: spoolPath, LogPath: logPath, UmaskForce: umaskForce, + ShowPrgrs: showPrgrs, Self: self, Neigh: make(map[NodeId]*Node, len(cfgJSON.Neigh)), Alias: make(map[string]*NodeId), diff --git a/src/check.go b/src/check.go index 9c871a94c43f84a673c76f731fad8edbf3672b8a5212a03d4139adff2308d9a5..020d0f2aeee458e073a727e6bdf8019a91016f4699a15a3ddb54abd4139010e3 100644 --- a/src/check.go +++ b/src/check.go @@ -20,46 +20,47 @@ import ( "bufio" "bytes" + "errors" "io" "log" "golang.org/x/crypto/blake2b" ) -func Check(src io.Reader, checksum []byte) (bool, error) { +func Check(src io.Reader, checksum []byte, sds SDS, showPrgrs bool) (bool, error) { hsh, err := blake2b.New256(nil) if err != nil { log.Fatalln(err) } - if _, err = io.Copy(hsh, bufio.NewReader(src)); err != nil { + if _, err = CopyProgressed(hsh, bufio.NewReader(src), sds, showPrgrs); err != nil { return false, err } return bytes.Compare(hsh.Sum(nil), checksum) == 0, nil } -func (ctx *Ctx) checkXx(nodeId *NodeId, xx TRxTx) bool { +func (ctx *Ctx) checkXxIsBad(nodeId *NodeId, xx TRxTx) bool { isBad := false for job := range ctx.Jobs(nodeId, xx) { sds := SDS{ - "xx": string(xx), - "node": nodeId, - "pkt": ToBase32(job.HshValue[:]), + "xx": string(xx), + "node": nodeId, + "pkt": ToBase32(job.HshValue[:]), + "fullsize": job.Size, } - ctx.LogP("check", sds, "") - gut, err := Check(job.Fd, job.HshValue[:]) + gut, err := Check(job.Fd, job.HshValue[:], sds, ctx.ShowPrgrs) job.Fd.Close() if err != nil { - ctx.LogE("check", SdsAdd(sds, SDS{"err": err}), "") - return false + ctx.LogE("check", sds, err, "") + return true } if !gut { isBad = true - ctx.LogE("check", sds, "bad") + ctx.LogE("check", sds, errors.New("bad"), "") } } return isBad } func (ctx *Ctx) Check(nodeId *NodeId) bool { - return ctx.checkXx(nodeId, TRx) || ctx.checkXx(nodeId, TTx) + return !(ctx.checkXxIsBad(nodeId, TRx) || ctx.checkXxIsBad(nodeId, TTx)) } diff --git a/src/cmd/nncp-bundle/main.go b/src/cmd/nncp-bundle/main.go index 832c8b328e8233736a43c9ce3957de4ccc337cbd448c51ab17f95d2ee802ad35..6be3df8c7d828ec1a4486ccec4f632e5bff1a1a3ea004b46fe29f9d66791f251 100644 --- a/src/cmd/nncp-bundle/main.go +++ b/src/cmd/nncp-bundle/main.go @@ -22,6 +22,7 @@ import ( "archive/tar" "bufio" "bytes" + "errors" "flag" "fmt" "io" @@ -29,7 +30,6 @@ "io/ioutil" "log" "os" "path/filepath" - "strconv" "strings" xdr "github.com/davecgh/go-xdr/xdr2" @@ -63,6 +63,8 @@ dryRun = flag.Bool("dryrun", false, "Do no writes") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -88,7 +90,15 @@ if !*doRx && !*doTx { log.Fatalln("At least one of -rx and -tx must be specified") } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } @@ -142,7 +152,14 @@ Typeflag: tar.TypeReg, }); err != nil { log.Fatalln("Error writing tar header:", err) } - if _, err = io.Copy(tarWr, job.Fd); err != nil { + if _, err = nncp.CopyProgressed( + tarWr, job.Fd, + nncp.SdsAdd(sds, nncp.SDS{ + "pkt": nncp.ToBase32(job.HshValue[:]), + "fullsize": job.Size, + }), + ctx.ShowPrgrs, + ); err != nil { log.Fatalln("Error during copying to tar:", err) } job.Fd.Close() @@ -157,9 +174,7 @@ if err = os.Remove(job.Fd.Name()); err != nil { log.Fatalln("Error during deletion:", err) } } - ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{ - "size": strconv.FormatInt(job.Size, 10), - }), "") + ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{"size": job.Size}), "") } } if err = tarWr.Close(); err != nil { @@ -224,9 +239,13 @@ if entry.Size < nncp.PktEncOverhead { ctx.LogD("nncp-bundle", sds, "Too small packet") continue } + if !ctx.IsEnoughSpace(entry.Size) { + ctx.LogE("nncp-bundle", sds, errors.New("not enough spool space"), "") + continue + } pktName = filepath.Base(entry.Name) if _, err = nncp.FromBase32(pktName); err != nil { - ctx.LogD("nncp-bundle", sds, "Bad packet name") + ctx.LogD("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{"err": "bad packet name"}), "") continue } if _, err = io.ReadFull(tarR, pktEncBuf); err != nil { @@ -273,7 +292,11 @@ } if _, err = hsh.Write(pktEncBuf); err != nil { log.Fatalln("Error during writing:", err) } - if _, err = io.Copy(hsh, tarR); err != nil { + if _, err = nncp.CopyProgressed( + hsh, tarR, + nncp.SdsAdd(sds, nncp.SDS{"fullsize": entry.Size}), + ctx.ShowPrgrs, + ); err != nil { log.Fatalln("Error during copying:", err) } if nncp.ToBase32(hsh.Sum(nil)) == pktName { @@ -282,7 +305,7 @@ if !*dryRun { os.Remove(dstPath) } } else { - ctx.LogE("nncp-bundle", sds, "bad checksum") + ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "") } continue } @@ -298,6 +321,7 @@ } } sds["node"] = nncp.ToBase32(pktEnc.Recipient[:]) sds["pkt"] = pktName + sds["fullsize"] = entry.Size selfPath = filepath.Join(ctx.Spool, ctx.SelfId.String(), string(nncp.TRx)) dstPath = filepath.Join(selfPath, pktName) if _, err = os.Stat(dstPath); err == nil || !os.IsNotExist(err) { @@ -317,11 +341,11 @@ } if _, err = hsh.Write(pktEncBuf); err != nil { log.Fatalln("Error during writing:", err) } - if _, err = io.Copy(hsh, tarR); err != nil { + if _, err = nncp.CopyProgressed(hsh, tarR, sds, ctx.ShowPrgrs); err != nil { log.Fatalln("Error during copying:", err) } if nncp.ToBase32(hsh.Sum(nil)) != pktName { - ctx.LogE("nncp-bundle", sds, "bad checksum") + ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "") continue } } else { @@ -332,7 +356,7 @@ } if _, err = tmp.W.Write(pktEncBuf); err != nil { log.Fatalln("Error during writing:", err) } - if _, err = io.Copy(tmp.W, tarR); err != nil { + if _, err = nncp.CopyProgressed(tmp.W, tarR, sds, ctx.ShowPrgrs); err != nil { log.Fatalln("Error during copying:", err) } if err = tmp.W.Flush(); err != nil { @@ -343,14 +367,14 @@ if err = tmp.Commit(selfPath); err != nil { log.Fatalln("Error during commiting:", err) } } else { - ctx.LogE("nncp-bundle", sds, "bad checksum") + ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "") tmp.Cancel() continue } } } else { if *dryRun { - if _, err = io.Copy(ioutil.Discard, tarR); err != nil { + if _, err = nncp.CopyProgressed(ioutil.Discard, tarR, sds, ctx.ShowPrgrs); err != nil { log.Fatalln("Error during copying:", err) } } else { @@ -362,7 +386,7 @@ bufTmp := bufio.NewWriterSize(tmp, CopyBufSize) if _, err = bufTmp.Write(pktEncBuf); err != nil { log.Fatalln("Error during writing:", err) } - if _, err = io.Copy(bufTmp, tarR); err != nil { + if _, err = nncp.CopyProgressed(bufTmp, tarR, sds, ctx.ShowPrgrs); err != nil { log.Fatalln("Error during copying:", err) } if err = bufTmp.Flush(); err != nil { @@ -384,7 +408,7 @@ } } } ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{ - "size": strconv.FormatInt(entry.Size, 10), + "size": sds["fullsize"], }), "") } } diff --git a/src/cmd/nncp-call/main.go b/src/cmd/nncp-call/main.go index 0b0396225f84057a1d6c55f6825bd37da8297e28a0b582e33844d5036361efb0..873e8525395cc9d870b8bbcca23749ef217bd26dbc6f1ab93eae0e99fcaba21e 100644 --- a/src/cmd/nncp-call/main.go +++ b/src/cmd/nncp-call/main.go @@ -49,6 +49,8 @@ txRate = flag.Int("txrate", 0, "Maximal transmit rate, pkts/sec") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -78,7 +80,15 @@ if *rxOnly && *txOnly { log.Fatalln("-rx and -tx can not be set simultaneously") } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-caller/main.go b/src/cmd/nncp-caller/main.go index 4aa579dd6c6a022c67ccf4f1d7261f72bd6a0f6688712f4174f12ce5b20837ee..e635419864ae2e05ab3b3cf9a21776cd8aad513b54a65b89d10caab594212f87 100644 --- a/src/cmd/nncp-caller/main.go +++ b/src/cmd/nncp-caller/main.go @@ -19,11 +19,11 @@ // Croned NNCP TCP daemon caller. package main import ( + "errors" "flag" "fmt" "log" "os" - "strconv" "sync" "time" @@ -44,6 +44,8 @@ cfgPath = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -59,7 +61,15 @@ fmt.Println(nncp.VersionGet()) return } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } @@ -105,13 +115,13 @@ } } else { addrs = append(addrs, *call.Addr) } - sds := nncp.SDS{"node": node.Id, "callindex": strconv.Itoa(i)} + sds := nncp.SDS{"node": node.Id, "callindex": i} for { n := time.Now() t := call.Cron.Next(n) ctx.LogD("caller", sds, t.String()) if t.IsZero() { - ctx.LogE("caller", sds, "got zero time") + ctx.LogE("caller", sds, errors.New("got zero time"), "") return } time.Sleep(t.Sub(n)) diff --git a/src/cmd/nncp-cfgmin/main.go b/src/cmd/nncp-cfgmin/main.go index 83c4a100bf7a13329e7c04d73d7112cb8f54dd084f15b6c18f4e36e502b15673..ccae089c87b5e91f2bd71020e6a77b39a05684082b1038fd860fa77e72c0b8db 100644 --- a/src/cmd/nncp-cfgmin/main.go +++ b/src/cmd/nncp-cfgmin/main.go @@ -52,7 +52,7 @@ fmt.Println(nncp.VersionGet()) return } - ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false) + ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false, false, false) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-cfgnew/main.go b/src/cmd/nncp-cfgnew/main.go index 6c4cf7a0df4d74e311658caba6843874a47ef947ac790543774ead66b288301a..5f0653241a6f22a961d54dd872917cd344b6dd3d4c63f424974d6f571f18d7ea 100644 --- a/src/cmd/nncp-cfgnew/main.go +++ b/src/cmd/nncp-cfgnew/main.go @@ -102,6 +102,8 @@ # Path to log file log: %s # Enforce specified umask usage # umask: "022" + # Omit progress showing by default + # noprogress: true # Enable notification email sending # notify: { diff --git a/src/cmd/nncp-check/main.go b/src/cmd/nncp-check/main.go index 82d36f51ae2b16ba36299cba4274b492591e3ce925afb24cc3dfb65b95c8cd52..be8e57e5fdf9fc09a95238f1258c9abe803f61ba43b05e5d9fe6b5a3b49cac31 100644 --- a/src/cmd/nncp-check/main.go +++ b/src/cmd/nncp-check/main.go @@ -41,6 +41,8 @@ nodeRaw = flag.String("node", "", "Process only that node") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -56,7 +58,15 @@ fmt.Println(nncp.VersionGet()) return } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-daemon/main.go b/src/cmd/nncp-daemon/main.go index 8a30d2c7265708653ef8fed1525d4e2e18f4a596d5d68d3226a56a2008e3f6f2..be505ea72964379b15335c271417b47b623142c9f2af9e4e1fe4a27a347e77cd 100644 --- a/src/cmd/nncp-daemon/main.go +++ b/src/cmd/nncp-daemon/main.go @@ -24,7 +24,6 @@ "fmt" "log" "net" "os" - "strconv" "time" "go.cypherpunks.ru/nncp/v5" @@ -73,18 +72,18 @@ ctx.LogI("call-start", nncp.SDS{"node": state.Node.Id}, "connected") state.Wait() ctx.LogI("call-finish", nncp.SDS{ "node": state.Node.Id, - "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10), - "rxbytes": strconv.FormatInt(state.RxBytes, 10), - "txbytes": strconv.FormatInt(state.TxBytes, 10), - "rxspeed": strconv.FormatInt(state.RxSpeed, 10), - "txspeed": strconv.FormatInt(state.TxSpeed, 10), + "duration": state.Duration.Seconds(), + "rxbytes": state.RxBytes, + "txbytes": state.TxBytes, + "rxspeed": state.RxSpeed, + "txspeed": state.TxSpeed, }, "") } else { nodeId := "unknown" if state.Node != nil { nodeId = state.Node.Id.String() } - ctx.LogE("call-start", nncp.SDS{"node": nodeId, "err": err}, "") + ctx.LogE("call-start", nncp.SDS{"node": nodeId}, err, "") } } @@ -98,6 +97,8 @@ maxConn = flag.Int("maxconn", 128, "Maximal number of simultaneous connections") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -117,7 +118,15 @@ if err != nil { log.Fatalln(err) } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-exec/main.go b/src/cmd/nncp-exec/main.go index f478ed907e81be17e1d7d3eb81e6c979dc2dc7cdb5eccc920f90eb07139c1d4f..00a7c13cff290a5acb521bb6b595b45e36145b2589890896a8a098bfacdd5c99 100644 --- a/src/cmd/nncp-exec/main.go +++ b/src/cmd/nncp-exec/main.go @@ -45,6 +45,8 @@ viaOverride = flag.String("via", "", "Override Via path to destination node") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -72,7 +74,15 @@ if err != nil { log.Fatalln(err) } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-file/main.go b/src/cmd/nncp-file/main.go index 08bca0e7ab7ad7b4c6cdf038050459a4944949f2512fcbfd0f51170d3ac21f8e..0d30e5cd58477f0b4a78ccebf66303dda18ee4aa119e7908e15bd65db071d441 100644 --- a/src/cmd/nncp-file/main.go +++ b/src/cmd/nncp-file/main.go @@ -51,6 +51,8 @@ viaOverride = flag.String("via", "", "Override Via path to destination node") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -74,7 +76,15 @@ if err != nil { log.Fatalln(err) } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-freq/main.go b/src/cmd/nncp-freq/main.go index 4cc3e7dfd5a7a1066109b9f6d26d7f5f05c57ac362a1044b6059d2a13ebd96d7..e9f2c845ef3d6024c59246d9cfd1ec98398cdac364cada6e2293580b62959cad 100644 --- a/src/cmd/nncp-freq/main.go +++ b/src/cmd/nncp-freq/main.go @@ -46,6 +46,8 @@ viaOverride = flag.String("via", "", "Override Via path to destination node") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -73,7 +75,15 @@ if err != nil { log.Fatalln(err) } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-log/main.go b/src/cmd/nncp-log/main.go index 2e9fa81fb0ee9ae3d947b2a1877fb4138ca8f4ca2ae90797674027150edcb777..50c568595e1d903d54fe3b8e8b7ebc89d709dc6071a16daae82e85e3e6c1a136 100644 --- a/src/cmd/nncp-log/main.go +++ b/src/cmd/nncp-log/main.go @@ -54,7 +54,7 @@ fmt.Println(nncp.VersionGet()) return } - ctx, err := nncp.CtxFromCmdline(*cfgPath, "", *logPath, false, *debug) + ctx, err := nncp.CtxFromCmdline(*cfgPath, "", *logPath, false, false, false, *debug) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-pkt/main.go b/src/cmd/nncp-pkt/main.go index a234ad1785b37301f164b00ea0d3a0213effa9dcf9435961ea1e1946519ad274..5129f785957e5cc9b11e7aeb5771070aa1c2d67c741d91fa7f0ba965db37928c 100644 --- a/src/cmd/nncp-pkt/main.go +++ b/src/cmd/nncp-pkt/main.go @@ -132,7 +132,7 @@ var pktEnc nncp.PktEnc _, err = xdr.Unmarshal(bytes.NewReader(beginning), &pktEnc) if err == nil && pktEnc.Magic == nncp.MagicNNCPEv4 { if *dump { - ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false) + ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false, false, false) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-reass/main.go b/src/cmd/nncp-reass/main.go index 515424fa6ab7e5f3609dd26e88323a62b65db43dca65ed5104cf1fa4ff879f05..d486aa5138a1a73a7c581178f897110d802d143d093a692e381fa09d2553d4d4 100644 --- a/src/cmd/nncp-reass/main.go +++ b/src/cmd/nncp-reass/main.go @@ -22,6 +22,7 @@ import ( "bufio" "bytes" "encoding/hex" + "errors" "flag" "fmt" "hash" @@ -57,21 +58,18 @@ log.Fatalln("Can not open file:", err) } var metaPkt nncp.ChunkedMeta if _, err = xdr.Unmarshal(fd, &metaPkt); err != nil { - ctx.LogE("nncp-reass", nncp.SDS{"path": path, "err": err}, "bad meta file") + ctx.LogE("nncp-reass", nncp.SDS{"path": path}, err, "bad meta file") return false } fd.Close() if metaPkt.Magic != nncp.MagicNNCPMv1 { - ctx.LogE("nncp-reass", nncp.SDS{"path": path, "err": nncp.BadMagic}, "") + ctx.LogE("nncp-reass", nncp.SDS{"path": path}, nncp.BadMagic, "") return false } metaName := filepath.Base(path) if !strings.HasSuffix(metaName, nncp.ChunkedSuffixMeta) { - ctx.LogE("nncp-reass", nncp.SDS{ - "path": path, - "err": "invalid filename suffix", - }, "") + ctx.LogE("nncp-reass", nncp.SDS{"path": path}, errors.New("invalid filename suffix"), "") return false } mainName := strings.TrimSuffix(metaName, nncp.ChunkedSuffixMeta) @@ -108,10 +106,7 @@ allChunksExist := true for chunkNum, chunkPath := range chunksPaths { fi, err := os.Stat(chunkPath) if err != nil && os.IsNotExist(err) { - ctx.LogI("nncp-reass", nncp.SDS{ - "path": path, - "chunk": strconv.Itoa(chunkNum), - }, "missing") + ctx.LogI("nncp-reass", nncp.SDS{"path": path, "chunk": chunkNum}, "missing") allChunksExist = false continue } @@ -122,10 +117,11 @@ } else { badSize = uint64(fi.Size()) != metaPkt.ChunkSize } if badSize { - ctx.LogE("nncp-reass", nncp.SDS{ - "path": path, - "chunk": strconv.Itoa(chunkNum), - }, "invalid size") + ctx.LogE( + "nncp-reass", + nncp.SDS{"path": path, "chunk": chunkNum}, + errors.New("invalid size"), "", + ) allChunksExist = false } } @@ -139,20 +135,28 @@ for chunkNum, chunkPath := range chunksPaths { fd, err = os.Open(chunkPath) if err != nil { log.Fatalln("Can not open file:", err) + } + fi, err := fd.Stat() + if err != nil { + log.Fatalln("Can not stat file:", err) } hsh, err = blake2b.New256(nil) if err != nil { log.Fatalln(err) } - if _, err = io.Copy(hsh, bufio.NewReader(fd)); err != nil { + if _, err = nncp.CopyProgressed(hsh, bufio.NewReader(fd), nncp.SDS{ + "pkt": chunkPath, + "fullsize": fi.Size(), + }, ctx.ShowPrgrs); err != nil { log.Fatalln(err) } fd.Close() if bytes.Compare(hsh.Sum(nil), metaPkt.Checksums[chunkNum][:]) != 0 { - ctx.LogE("nncp-reass", nncp.SDS{ - "path": path, - "chunk": strconv.Itoa(chunkNum), - }, "checksum is bad") + ctx.LogE( + "nncp-reass", + nncp.SDS{"path": path, "chunk": chunkNum}, + errors.New("checksum is bad"), "", + ) allChecksumsGood = false } } @@ -187,16 +191,20 @@ fd, err = os.Open(chunkPath) if err != nil { log.Fatalln("Can not open file:", err) } - if _, err = io.Copy(dstW, bufio.NewReader(fd)); err != nil { + fi, err := fd.Stat() + if err != nil { + log.Fatalln("Can not stat file:", err) + } + if _, err = nncp.CopyProgressed(dstW, bufio.NewReader(fd), nncp.SDS{ + "pkt": chunkPath, + "fullsize": fi.Size(), + }, ctx.ShowPrgrs); err != nil { log.Fatalln(err) } fd.Close() if !keep { if err = os.Remove(chunkPath); err != nil { - ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{ - "chunk": strconv.Itoa(chunkNum), - "err": err, - }), "") + ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{"chunk": chunkNum}), err, "") hasErrors = true } } @@ -213,7 +221,7 @@ } ctx.LogD("nncp-reass", sds, "written") if !keep { if err = os.Remove(path); err != nil { - ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "") + ctx.LogE("nncp-reass", sds, err, "") hasErrors = true } } @@ -249,13 +257,13 @@ func findMetas(ctx *nncp.Ctx, dirPath string) []string { dir, err := os.Open(dirPath) defer dir.Close() if err != nil { - ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath, "err": err}, "") + ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath}, err, "") return nil } fis, err := dir.Readdir(0) dir.Close() if err != nil { - ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath, "err": err}, "") + ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath}, err, "") return nil } metaPaths := make([]string, 0) @@ -279,6 +287,8 @@ stdout = flag.Bool("stdout", false, "Output reassembled FILE to stdout") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -294,7 +304,15 @@ fmt.Println(nncp.VersionGet()) return } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-rm/main.go b/src/cmd/nncp-rm/main.go index e234aedd3758fa62577729a14adbe5882f9017cb771fea369a2fbb5ff7703a8a..59debf09d1ceb4a34e1741847739a822404215c457acfc72727e97432b9ee4e8 100644 --- a/src/cmd/nncp-rm/main.go +++ b/src/cmd/nncp-rm/main.go @@ -70,23 +70,25 @@ fmt.Println(nncp.VersionGet()) return } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", *quiet, *debug) + ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", *quiet, false, false, *debug) if err != nil { log.Fatalln("Error during initialization:", err) } ctx.Umask() if *doTmp { - err = filepath.Walk(filepath.Join(ctx.Spool, "tmp"), func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } - ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") - return os.Remove(path) - }) + err = filepath.Walk( + filepath.Join(ctx.Spool, "tmp"), + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") + return os.Remove(path) + }) if err != nil { log.Fatalln("Error during walking:", err) } @@ -120,34 +122,36 @@ if err != nil { log.Fatalln("Invalid -node specified:", err) } remove := func(xx nncp.TRxTx) error { - return filepath.Walk(filepath.Join(ctx.Spool, node.Id.String(), string(xx)), func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { + return filepath.Walk( + filepath.Join(ctx.Spool, node.Id.String(), string(xx)), + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + if *doSeen && strings.HasSuffix(info.Name(), nncp.SeenSuffix) { + ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") + return os.Remove(path) + } + if *doPart && strings.HasSuffix(info.Name(), nncp.PartSuffix) { + ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") + return os.Remove(path) + } + if *pktRaw != "" && filepath.Base(info.Name()) == *pktRaw { + ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") + return os.Remove(path) + } + if !*doSeen && + !*doPart && + (*doRx || *doTx) && + ((*doRx && xx == nncp.TRx) || (*doTx && xx == nncp.TTx)) { + ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") + return os.Remove(path) + } return nil - } - if *doSeen && strings.HasSuffix(info.Name(), nncp.SeenSuffix) { - ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") - return os.Remove(path) - } - if *doPart && strings.HasSuffix(info.Name(), nncp.PartSuffix) { - ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") - return os.Remove(path) - } - if *pktRaw != "" && filepath.Base(info.Name()) == *pktRaw { - ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") - return os.Remove(path) - } - if !*doSeen && - !*doPart && - (*doRx || *doTx) && - ((*doRx && xx == nncp.TRx) || (*doTx && xx == nncp.TTx)) { - ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "") - return os.Remove(path) - } - return nil - }) + }) } if *pktRaw != "" || *doRx || *doSeen || *doPart { if err = remove(nncp.TRx); err != nil { diff --git a/src/cmd/nncp-stat/main.go b/src/cmd/nncp-stat/main.go index d0c2ec5dbf1c0e01c43761eb142e9a78f4e56b1b8674392076de29d62c99b7b1..2e05348e0a7476825be7e5ca0d037fc53185aa46cdb6d984983fa45770a271f8 100644 --- a/src/cmd/nncp-stat/main.go +++ b/src/cmd/nncp-stat/main.go @@ -56,7 +56,7 @@ fmt.Println(nncp.VersionGet()) return } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", false, *debug) + ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", false, false, false, *debug) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-toss/main.go b/src/cmd/nncp-toss/main.go index 975f371aad2928a9aaabd52e1979f13304769774747aa5ae107181c26bb6940a..3f5eac3060ee5985d38dbac7361158b051e08feb2215196383d89470402e86bc 100644 --- a/src/cmd/nncp-toss/main.go +++ b/src/cmd/nncp-toss/main.go @@ -50,6 +50,8 @@ noTrns = flag.Bool("notrns", false, "Do not process packets with type: transitional") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -69,7 +71,15 @@ if err != nil { log.Fatalln(err) } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } diff --git a/src/cmd/nncp-xfer/main.go b/src/cmd/nncp-xfer/main.go index 1858ebff9e587d4d8708fead86df93b319df8267fa3497737ae6953f9cf8459c..a7edef1501dd95b6e78239a53b9ab67f6e2232e40b6809dacea9a812f8c15925 100644 --- a/src/cmd/nncp-xfer/main.go +++ b/src/cmd/nncp-xfer/main.go @@ -20,13 +20,13 @@ package main import ( "bufio" + "errors" "flag" "fmt" "io" "log" "os" "path/filepath" - "strconv" xdr "github.com/davecgh/go-xdr/xdr2" "go.cypherpunks.ru/nncp/v5" @@ -51,6 +51,8 @@ keep = flag.Bool("keep", false, "Do not delete transferred packets") spoolPath = flag.String("spool", "", "Override path to spool") logPath = flag.String("log", "", "Override path to logfile") quiet = flag.Bool("quiet", false, "Print only errors") + showPrgrs = flag.Bool("progress", false, "Force progress showing") + omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing") debug = flag.Bool("debug", false, "Print debug messages") version = flag.Bool("version", false, "Print version information") warranty = flag.Bool("warranty", false, "Print warranty information") @@ -77,7 +79,15 @@ if *rxOnly && *txOnly { log.Fatalln("-rx and -tx can not be set simultaneously") } - ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug) + ctx, err := nncp.CtxFromCmdline( + *cfgPath, + *spoolPath, + *logPath, + *quiet, + *showPrgrs, + *omitPrgrs, + *debug, + ) if err != nil { log.Fatalln("Error during initialization:", err) } @@ -107,20 +117,20 @@ if os.IsNotExist(err) { ctx.LogD("nncp-xfer", sds, "no dir") goto Tx } - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat") + ctx.LogE("nncp-xfer", sds, err, "stat") isBad = true goto Tx } dir, err = os.Open(selfPath) if err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open") + ctx.LogE("nncp-xfer", sds, err, "open") isBad = true goto Tx } fis, err = dir.Readdir(0) dir.Close() if err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "read") + ctx.LogE("nncp-xfer", sds, err, "read") isBad = true goto Tx } @@ -144,14 +154,14 @@ continue } dir, err = os.Open(filepath.Join(selfPath, fi.Name())) if err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open") + ctx.LogE("nncp-xfer", sds, err, "open") isBad = true continue } fisInt, err := dir.Readdir(0) dir.Close() if err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "read") + ctx.LogE("nncp-xfer", sds, err, "read") isBad = true continue } @@ -159,12 +169,16 @@ for _, fiInt := range fisInt { if !fi.IsDir() { continue } + // Check that it is valid Base32 encoding + if _, err = nncp.NodeIdFromString(fiInt.Name()); err != nil { + continue + } filename := filepath.Join(dir.Name(), fiInt.Name()) sds["file"] = filename delete(sds, "size") fd, err := os.Open(filename) if err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open") + ctx.LogE("nncp-xfer", sds, err, "open") isBad = true continue } @@ -180,9 +194,9 @@ ctx.LogD("nncp-xfer", sds, "too nice") fd.Close() continue } - sds["size"] = strconv.FormatInt(fiInt.Size(), 10) + sds["size"] = fiInt.Size() if !ctx.IsEnoughSpace(fiInt.Size()) { - ctx.LogE("nncp-xfer", sds, "is not enough space") + ctx.LogE("nncp-xfer", sds, errors.New("is not enough space"), "") fd.Close() continue } @@ -191,14 +205,28 @@ tmp, err := ctx.NewTmpFileWHash() if err != nil { log.Fatalln(err) } - if _, err = io.CopyN(tmp.W, bufio.NewReader(fd), fiInt.Size()); err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "copy") + r, w := io.Pipe() + go func() { + _, err := io.CopyN(w, bufio.NewReader(fd), fiInt.Size()) + if err == nil { + w.Close() + return + } + ctx.LogE("nncp-xfer", sds, err, "copy") + w.CloseWithError(err) + }() + if _, err = nncp.CopyProgressed(tmp.W, r, nncp.SdsAdd(sds, nncp.SDS{ + "pkt": filename, + "fullsize": sds["size"], + }), ctx.ShowPrgrs); err != nil { + ctx.LogE("nncp-xfer", sds, err, "copy") isBad = true - fd.Close() + } + fd.Close() + if isBad { tmp.Cancel() continue } - fd.Close() if err = tmp.Commit(filepath.Join( ctx.Spool, nodeId.String(), @@ -209,7 +237,7 @@ } ctx.LogI("nncp-xfer", sds, "") if !*keep { if err = os.Remove(filename); err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "remove") + ctx.LogE("nncp-xfer", sds, err, "remove") isBad = true } } @@ -246,13 +274,13 @@ continue } if err = os.Mkdir(nodePath, os.FileMode(0777)); err != nil { ctx.UnlockDir(dirLock) - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mkdir") + ctx.LogE("nncp-xfer", sds, err, "mkdir") isBad = true continue } } else { ctx.UnlockDir(dirLock) - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat") + ctx.LogE("nncp-xfer", sds, err, "stat") isBad = true continue } @@ -264,13 +292,13 @@ if err != nil { if os.IsNotExist(err) { if err = os.Mkdir(dstPath, os.FileMode(0777)); err != nil { ctx.UnlockDir(dirLock) - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mkdir") + ctx.LogE("nncp-xfer", sds, err, "mkdir") isBad = true continue } } else { ctx.UnlockDir(dirLock) - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat") + ctx.LogE("nncp-xfer", sds, err, "stat") isBad = true continue } @@ -296,7 +324,7 @@ continue } tmp, err := nncp.TempFile(dstPath, "xfer") if err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mktemp") + ctx.LogE("nncp-xfer", sds, err, "mktemp") job.Fd.Close() isBad = true break @@ -304,45 +332,48 @@ } sds["tmp"] = tmp.Name() ctx.LogD("nncp-xfer", sds, "created") bufW := bufio.NewWriter(tmp) - copied, err := io.Copy(bufW, bufio.NewReader(job.Fd)) + copied, err := nncp.CopyProgressed( + bufW, + bufio.NewReader(job.Fd), + nncp.SdsAdd(sds, nncp.SDS{"fullsize": job.Size}), + ctx.ShowPrgrs, + ) job.Fd.Close() if err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "copy") + ctx.LogE("nncp-xfer", sds, err, "copy") tmp.Close() isBad = true continue } if err = bufW.Flush(); err != nil { tmp.Close() - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "flush") + ctx.LogE("nncp-xfer", sds, err, "flush") isBad = true continue } if err = tmp.Sync(); err != nil { tmp.Close() - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "sync") + ctx.LogE("nncp-xfer", sds, err, "sync") isBad = true continue } tmp.Close() if err = os.Rename(tmp.Name(), filepath.Join(dstPath, pktName)); err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "rename") + ctx.LogE("nncp-xfer", sds, err, "rename") isBad = true continue } if err = nncp.DirSync(dstPath); err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "sync") + ctx.LogE("nncp-xfer", sds, err, "sync") isBad = true continue } os.Remove(filepath.Join(dstPath, pktName+".part")) delete(sds, "tmp") - ctx.LogI("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{ - "size": strconv.FormatInt(copied, 10), - }), "") + ctx.LogI("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"size": copied}), "") if !*keep { if err = os.Remove(job.Fd.Name()); err != nil { - ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "remove") + ctx.LogE("nncp-xfer", sds, err, "remove") isBad = true } } diff --git a/src/ctx.go b/src/ctx.go index 24eea3d848415224c3d1a46b538fc72b089ba551423a95600957bbceed37dd72..256baba51eba94ddca44f4dfe5186cc3d36f8bae3f868d14829019e63ffda0c4 100644 --- a/src/ctx.go +++ b/src/ctx.go @@ -39,6 +39,7 @@ Spool string LogPath string UmaskForce *int Quiet bool + ShowPrgrs bool Debug bool NotifyFile *FromToJSON NotifyFreq *FromToJSON @@ -64,22 +65,30 @@ func (ctx *Ctx) ensureRxDir(nodeId *NodeId) error { dirPath := filepath.Join(ctx.Spool, nodeId.String(), string(TRx)) if err := os.MkdirAll(dirPath, os.FileMode(0777)); err != nil { - ctx.LogE("dir-ensure", SDS{"dir": dirPath, "err": err}, "") + ctx.LogE("dir-ensure", SDS{"dir": dirPath}, err, "") return err } fd, err := os.Open(dirPath) if err != nil { - ctx.LogE("dir-ensure", SDS{"dir": dirPath, "err": err}, "") + ctx.LogE("dir-ensure", SDS{"dir": dirPath}, err, "") return err } fd.Close() return nil } -func CtxFromCmdline(cfgPath, spoolPath, logPath string, quiet, debug bool) (*Ctx, error) { +func CtxFromCmdline( + cfgPath, + spoolPath, + logPath string, + quiet, showPrgrs, omitPrgrs, debug bool, +) (*Ctx, error) { env := os.Getenv(CfgPathEnv) if env != "" { cfgPath = env + } + if showPrgrs && omitPrgrs { + return nil, errors.New("simultaneous -progress and -noprogress") } cfgRaw, err := ioutil.ReadFile(cfgPath) if err != nil { @@ -104,6 +113,12 @@ ctx.LogPath = env } } else { ctx.LogPath = logPath + } + if showPrgrs { + ctx.ShowPrgrs = true + } + if quiet || omitPrgrs { + ctx.ShowPrgrs = false } ctx.Quiet = quiet ctx.Debug = debug diff --git a/src/humanizer.go b/src/humanizer.go index 7dc2778a8ec53c33d5970dc64c13a77a6984368250aabf51d878752902df5c1f..fcbd9e7acb364d3e5723993c31c9ba7cd7c5964ed401b146081cd31f1bbbe191 100644 --- a/src/humanizer.go +++ b/src/humanizer.go @@ -227,7 +227,7 @@ return s } msg = fmt.Sprintf( "Packet %s (%s) (nice %s)", - sds["hash"], + sds["pkt"], size, NicenessFmt(nice), ) @@ -254,7 +254,7 @@ return s } msg += fmt.Sprintf("%s packets, %s", sds["pkts"], size) case "sp-process": - msg = fmt.Sprintf("%s has %s (%s): %s", nodeS, sds["hash"], size, rem) + msg = fmt.Sprintf("%s has %s (%s): %s", nodeS, sds["pkt"], size, rem) case "sp-file": switch sds["xx"] { case "rx": @@ -274,7 +274,7 @@ return s } msg += fmt.Sprintf( "%s %d%% (%s / %s)", - sds["hash"], + sds["pkt"], 100*sizeParsed/fullsize, humanize.IBytes(uint64(sizeParsed)), humanize.IBytes(uint64(fullsize)), @@ -282,9 +282,9 @@ ) case "sp-done": switch sds["xx"] { case "rx": - msg = fmt.Sprintf("Packet %s is retreived (%s)", sds["hash"], size) + msg = fmt.Sprintf("Packet %s is retreived (%s)", sds["pkt"], size) case "tx": - msg = fmt.Sprintf("Packet %s is sent", sds["hash"]) + msg = fmt.Sprintf("Packet %s is sent", sds["pkt"]) default: return s } diff --git a/src/jobs.go b/src/jobs.go index e5bb4cbd66e96d5b6011284d734bc50c1b2f96c8fc96beef7a2001f601010359..22de9149ffd36bb849a9e834d10f0e9f0a49316056f6474a952fdc0c79e2942c 100644 --- a/src/jobs.go +++ b/src/jobs.go @@ -21,7 +21,6 @@ import ( "io" "os" "path/filepath" - "strconv" xdr "github.com/davecgh/go-xdr/xdr2" ) @@ -73,8 +72,8 @@ ctx.LogD("jobs", SDS{ "xx": string(xx), "node": pktEnc.Sender, "name": fi.Name(), - "nice": strconv.Itoa(int(pktEnc.Nice)), - "size": strconv.FormatInt(fi.Size(), 10), + "nice": int(pktEnc.Nice), + "size": fi.Size(), }, "taken") job := Job{ PktEnc: &pktEnc, diff --git a/src/lockdir.go b/src/lockdir.go index 4a73bc9ab8e3bc4122c7d2ee3f890f2e85d827291f3569127eb8762499f90401..2911217049d4f918c5b05a685933154fd35e18099a717f48e7ab3ced2e1020a0 100644 --- a/src/lockdir.go +++ b/src/lockdir.go @@ -33,12 +33,12 @@ os.O_CREATE|os.O_WRONLY, os.FileMode(0666), ) if err != nil { - ctx.LogE("lockdir", SDS{"path": lockPath, "err": err}, "") + ctx.LogE("lockdir", SDS{"path": lockPath}, err, "") return nil, err } err = unix.Flock(int(dirLock.Fd()), unix.LOCK_EX|unix.LOCK_NB) if err != nil { - ctx.LogE("lockdir", SDS{"path": lockPath, "err": err}, "") + ctx.LogE("lockdir", SDS{"path": lockPath}, err, "") dirLock.Close() return nil, err } diff --git a/src/log.go b/src/log.go index f8ea1bf2ccd4a2b0cb75ea1806b00c1fed955be2be7d4e04288e921e068f5a54..77875d24b0685a80b907bb83416144a4154d166aafdd547fea34997b26331a84 100644 --- a/src/log.go +++ b/src/log.go @@ -40,7 +40,14 @@ sort.Strings(keys) result := make([]string, 0, 1+len(keys)) result = append(result, "["+who) for _, k := range keys { - result = append(result, fmt.Sprintf(`%s="%s"`, k, sds[k])) + var value string + switch v := sds[k].(type) { + case int, int8, uint8, int64, uint64: + value = fmt.Sprintf("%d", v) + default: + value = fmt.Sprintf("%s", v) + } + result = append(result, fmt.Sprintf(`%s="%s"`, k, value)) } return strings.Join(result, " ") + "]" } @@ -103,13 +110,8 @@ } ctx.Log(msg) } -func (ctx *Ctx) LogP(who string, sds SDS, msg string) { - if !ctx.Quiet { - fmt.Fprintln(os.Stderr, ctx.Humanize(msgFmt(LogLevel("P"), who, sds, msg))) - } -} - -func (ctx *Ctx) LogE(who string, sds SDS, msg string) { +func (ctx *Ctx) LogE(who string, sds SDS, err error, msg string) { + sds["err"] = err.Error() msg = msgFmt(LogLevel("E"), who, sds, msg) if len(msg) > 2048 { msg = msg[:2048] diff --git a/src/progress.go b/src/progress.go new file mode 100644 index 0000000000000000000000000000000000000000..bc79635134647b085e8f0e8b957355a59a989219459b56340a1f38387e98c5af --- /dev/null +++ b/src/progress.go @@ -0,0 +1,151 @@ +/* +NNCP -- Node to Node copy, utilities for store-and-forward data exchange +Copyright (C) 2016-2019 Sergey Matveev + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, version 3 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +*/ + +package nncp + +import ( + "fmt" + "io" + "os" + "strings" + "sync" + "time" + + "github.com/dustin/go-humanize" + "go.cypherpunks.ru/nncp/v5/uilive" +) + +func init() { + uilive.Out = os.Stderr +} + +var progressBars = make(map[string]*ProgressBar) +var progressBarsLock sync.RWMutex + +type ProgressBar struct { + w *uilive.Writer + hash string + started time.Time + initial int64 + full int64 +} + +func ProgressBarNew(initial, full int64) *ProgressBar { + pb := ProgressBar{ + w: uilive.New(), + started: time.Now(), + initial: initial, + full: full, + } + pb.w.Start() + return &pb +} + +func (pb ProgressBar) Render(what string, size int64) { + now := time.Now().UTC() + timeDiff := now.Sub(pb.started).Seconds() + if timeDiff == 0 { + timeDiff = 1 + } + percentage := int64(100) + if pb.full > 0 { + percentage = 100 * size / pb.full + } + fmt.Fprintf( + pb.w, "%s %s %s/%s %d%% (%s/sec)\n", + now.Format(time.RFC3339), what, + humanize.IBytes(uint64(size)), + humanize.IBytes(uint64(pb.full)), + percentage, + humanize.IBytes(uint64(float64(size-pb.initial)/timeDiff)), + ) +} + +func (pb ProgressBar) Kill() { + pb.w.Stop() +} + +func CopyProgressed( + dst io.Writer, + src io.Reader, + sds SDS, + showPrgrs bool, +) (written int64, err error) { + buf := make([]byte, EncBlkSize) + var nr, nw int + var er, ew error + for { + nr, er = src.Read(buf) + if nr > 0 { + nw, ew = dst.Write(buf[:nr]) + if nw > 0 { + written += int64(nw) + if showPrgrs { + sds["size"] = written + Progress(sds) + } + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return +} + +func Progress(sds SDS) { + pkt := sds["pkt"].(string) + var size int64 + if sizeI, exists := sds["size"]; exists { + size = sizeI.(int64) + } + fullsize := sds["fullsize"].(int64) + progressBarsLock.RLock() + pb, exists := progressBars[pkt] + progressBarsLock.RUnlock() + if !exists { + progressBarsLock.Lock() + pb = ProgressBarNew(size, fullsize) + progressBars[pkt] = pb + progressBarsLock.Unlock() + } + what := pkt + if len(what) >= 52 { // Base32 encoded + what = what[:16] + ".." + what[len(what)-16:] + } + if xx, exists := sds["xx"]; exists { + what = strings.Title(xx.(string)) + " " + what + } + pb.Render(what, size) + if size >= fullsize { + pb.Kill() + progressBarsLock.Lock() + delete(progressBars, pkt) + progressBarsLock.Unlock() + } +} diff --git a/src/sp.go b/src/sp.go index 51a1fafbc37b7f9ba25b77681d92f0ecc99ccc3b305c240bf4f72f265a253558..8275f553ab78899c63373389a5ee7b31c851cb49bde89c66593a667671b39f7b 100644 --- a/src/sp.go +++ b/src/sp.go @@ -26,7 +26,6 @@ "net" "os" "path/filepath" "sort" - "strconv" "sync" "time" @@ -271,15 +270,15 @@ payloads = append(payloads, MarshalSP(SPTypeInfo, info)) ctx.LogD("sp-info-our", SDS{ "node": nodeId, "name": ToBase32(info.Hash[:]), - "size": strconv.FormatInt(int64(info.Size), 10), + "size": info.Size, }, "") } if totalSize > 0 { ctx.LogI("sp-infos", SDS{ "xx": string(TTx), "node": nodeId, - "pkts": strconv.Itoa(len(payloads)), - "size": strconv.FormatInt(totalSize, 10), + "pkts": len(payloads), + "size": totalSize, }, "") } return payloadsSplit(payloads) @@ -348,31 +347,31 @@ if err != nil { state.dirUnlock() return err } - sds := SDS{"node": nodeId, "nice": strconv.Itoa(int(state.Nice))} + sds := SDS{"node": nodeId, "nice": int(state.Nice)} state.Ctx.LogD("sp-start", sds, "sending first message") conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second)) if err = state.WriteSP(conn, buf); err != nil { - state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-start", sds, err, "") state.dirUnlock() return err } state.Ctx.LogD("sp-start", sds, "waiting for first message") conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second)) if buf, err = state.ReadSP(conn); err != nil { - state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-start", sds, err, "") state.dirUnlock() return err } payload, state.csOur, state.csTheir, err = state.hs.ReadMessage(nil, buf) if err != nil { - state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-start", sds, err, "") state.dirUnlock() return err } state.Ctx.LogD("sp-start", sds, "starting workers") err = state.StartWorkers(conn, infosPayloads, payload) if err != nil { - state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-start", sds, err, "") state.dirUnlock() return err } @@ -403,18 +402,14 @@ state.started = started state.xxOnly = xxOnly var buf []byte var payload []byte - state.Ctx.LogD( - "sp-start", - SDS{"nice": strconv.Itoa(int(state.Nice))}, - "waiting for first message", - ) + state.Ctx.LogD("sp-start", SDS{"nice": int(state.Nice)}, "waiting for first message") conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second)) if buf, err = state.ReadSP(conn); err != nil { - state.Ctx.LogE("sp-start", SDS{"err": err}, "") + state.Ctx.LogE("sp-start", SDS{}, err, "") return err } if payload, _, _, err = state.hs.ReadMessage(nil, buf); err != nil { - state.Ctx.LogE("sp-start", SDS{"err": err}, "") + state.Ctx.LogE("sp-start", SDS{}, err, "") return err } @@ -427,7 +422,7 @@ } } if node == nil { peerId := ToBase32(state.hs.PeerStatic()) - state.Ctx.LogE("sp-start", SDS{"peer": peerId}, "unknown") + state.Ctx.LogE("sp-start", SDS{"peer": peerId}, errors.New("unknown"), "") return errors.New("Unknown peer: " + peerId) } state.Node = node @@ -435,7 +430,7 @@ state.rxRate = node.RxRate state.txRate = node.TxRate state.onlineDeadline = node.OnlineDeadline state.maxOnlineTime = node.MaxOnlineTime - sds := SDS{"node": node.Id, "nice": strconv.Itoa(int(state.Nice))} + sds := SDS{"node": node.Id, "nice": int(state.Nice)} if state.Ctx.ensureRxDir(node.Id); err != nil { return err @@ -478,7 +473,7 @@ return err } conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second)) if err = state.WriteSP(conn, buf); err != nil { - state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-start", sds, err, "") state.dirUnlock() return err } @@ -495,13 +490,13 @@ func (state *SPState) StartWorkers( conn ConnDeadlined, infosPayloads [][]byte, payload []byte) error { - sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.Nice))} + sds := SDS{"node": state.Node.Id, "nice": int(state.Nice)} if len(infosPayloads) > 1 { go func() { for _, payload := range infosPayloads[1:] { state.Ctx.LogD( "sp-work", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}), + SdsAdd(sds, SDS{"size": len(payload)}), "queuing remaining payload", ) state.payloads <- payload @@ -510,12 +505,12 @@ }() } state.Ctx.LogD( "sp-work", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}), + SdsAdd(sds, SDS{"size": len(payload)}), "processing first payload", ) replies, err := state.ProcessSP(payload) if err != nil { - state.Ctx.LogE("sp-work", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-work", sds, err, "") return err } @@ -523,7 +518,7 @@ go func() { for _, reply := range replies { state.Ctx.LogD( "sp-work", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(reply))}), + SdsAdd(sds, SDS{"size": len(reply)}), "queuing reply", ) state.payloads <- reply @@ -543,7 +538,7 @@ &state.infosOurSeen, ) { state.Ctx.LogD( "sp-work", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}), + SdsAdd(sds, SDS{"size": len(payload)}), "queuing new info", ) state.payloads <- payload @@ -567,7 +562,7 @@ select { case payload = <-state.payloads: state.Ctx.LogD( "sp-xmit", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}), + SdsAdd(sds, SDS{"size": len(payload)}), "got payload", ) default: @@ -589,8 +584,8 @@ } sdsp := SdsAdd(sds, SDS{ "xx": string(TTx), - "hash": ToBase32(freq.Hash[:]), - "size": strconv.FormatInt(int64(freq.Offset), 10), + "pkt": ToBase32(freq.Hash[:]), + "size": int64(freq.Offset), }) state.Ctx.LogD("sp-file", sdsp, "queueing") fd, err := os.Open(filepath.Join( @@ -600,32 +595,32 @@ string(TTx), ToBase32(freq.Hash[:]), )) if err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-file", sdsp, err, "") break } fi, err := fd.Stat() if err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-file", sdsp, err, "") break } - fullSize := uint64(fi.Size()) + fullSize := fi.Size() var buf []byte - if freq.Offset < fullSize { + if freq.Offset < uint64(fullSize) { state.Ctx.LogD("sp-file", sdsp, "seeking") if _, err = fd.Seek(int64(freq.Offset), io.SeekStart); err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-file", sdsp, err, "") break } buf = make([]byte, MaxSPSize-SPHeadOverhead-SPFileOverhead) n, err := fd.Read(buf) if err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-file", sdsp, err, "") break } buf = buf[:n] state.Ctx.LogD( "sp-file", - SdsAdd(sdsp, SDS{"size": strconv.Itoa(n)}), + SdsAdd(sdsp, SDS{"size": n}), "read", ) } @@ -636,12 +631,14 @@ Offset: freq.Offset, Payload: buf, }) ourSize := freq.Offset + uint64(len(buf)) - sdsp["size"] = strconv.FormatInt(int64(ourSize), 10) - sdsp["fullsize"] = strconv.FormatInt(int64(fullSize), 10) - state.Ctx.LogP("sp-file", sdsp, "") + sdsp["size"] = int64(ourSize) + sdsp["fullsize"] = fullSize + if state.Ctx.ShowPrgrs { + Progress(sdsp) + } state.Lock() if len(state.queueTheir) > 0 && *state.queueTheir[0].freq.Hash == *freq.Hash { - if ourSize == fullSize { + if ourSize == uint64(fullSize) { state.Ctx.LogD("sp-file", sdsp, "finished") if len(state.queueTheir) > 1 { state.queueTheir = state.queueTheir[1:] @@ -658,12 +655,12 @@ state.Unlock() } state.Ctx.LogD( "sp-xmit", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}), + SdsAdd(sds, SDS{"size": len(payload)}), "sending", ) conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second)) if err := state.WriteSP(conn, state.csOur.Encrypt(nil, nil, payload)); err != nil { - state.Ctx.LogE("sp-xmit", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-xmit", sds, err, "") break } } @@ -691,34 +688,34 @@ } if unmarshalErr.ErrorCode == xdr.ErrIO { break } - state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-recv", sds, err, "") break } state.Ctx.LogD( "sp-recv", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}), + SdsAdd(sds, SDS{"size": len(payload)}), "got payload", ) payload, err = state.csTheir.Decrypt(nil, nil, payload) if err != nil { - state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-recv", sds, err, "") break } state.Ctx.LogD( "sp-recv", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}), + SdsAdd(sds, SDS{"size": len(payload)}), "processing", ) replies, err := state.ProcessSP(payload) if err != nil { - state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-recv", sds, err, "") break } go func() { for _, reply := range replies { state.Ctx.LogD( "sp-recv", - SdsAdd(sds, SDS{"size": strconv.Itoa(len(reply))}), + SdsAdd(sds, SDS{"size": len(reply)}), "queuing reply", ) state.payloads <- reply @@ -750,7 +747,7 @@ } } func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) { - sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.Nice))} + sds := SDS{"node": state.Node.Id, "nice": int(state.Nice)} r := bytes.NewReader(payload) var err error var replies [][]byte @@ -759,7 +756,7 @@ for r.Len() > 0 { state.Ctx.LogD("sp-process", sds, "unmarshaling header") var head SPHead if _, err = xdr.Unmarshal(r, &head); err != nil { - state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"err": err}), "") + state.Ctx.LogE("sp-process", sds, err, "") return nil, err } switch head.Type { @@ -769,13 +766,13 @@ sdsp := SdsAdd(sds, SDS{"type": "info"}) state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet") var info SPInfo if _, err = xdr.Unmarshal(r, &info); err != nil { - state.Ctx.LogE("sp-process", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-process", sdsp, err, "") return nil, err } sdsp = SdsAdd(sds, SDS{ - "hash": ToBase32(info.Hash[:]), - "size": strconv.FormatInt(int64(info.Size), 10), - "nice": strconv.Itoa(int(info.Nice)), + "pkt": ToBase32(info.Hash[:]), + "size": int64(info.Size), + "nice": int(info.Nice), }) if !state.listOnly && info.Nice > state.Nice { state.Ctx.LogD("sp-process", sdsp, "too nice") @@ -820,7 +817,7 @@ continue } state.Ctx.LogI( "sp-info", - SdsAdd(sdsp, SDS{"offset": strconv.FormatInt(offset, 10)}), + SdsAdd(sdsp, SDS{"offset": offset}), "", ) if !state.listOnly && (state.onlyPkts == nil || state.onlyPkts[*info.Hash]) { @@ -834,15 +831,12 @@ sdsp := SdsAdd(sds, SDS{"type": "file"}) state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet") var file SPFile if _, err = xdr.Unmarshal(r, &file); err != nil { - state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{ - "err": err, - "type": "file", - }), "") + state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"type": "file"}), err, "") return nil, err } sdsp["xx"] = string(TRx) - sdsp["hash"] = ToBase32(file.Hash[:]) - sdsp["size"] = strconv.Itoa(len(file.Payload)) + sdsp["pkt"] = ToBase32(file.Hash[:]) + sdsp["size"] = len(file.Payload) dirToSync := filepath.Join( state.Ctx.Spool, state.Node.Id.String(), @@ -856,31 +850,33 @@ os.O_RDWR|os.O_CREATE, os.FileMode(0666), ) if err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-file", sdsp, err, "") return nil, err } state.Ctx.LogD( "sp-file", - SdsAdd(sdsp, SDS{"offset": strconv.FormatInt(int64(file.Offset), 10)}), + SdsAdd(sdsp, SDS{"offset": file.Offset}), "seeking", ) if _, err = fd.Seek(int64(file.Offset), io.SeekStart); err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-file", sdsp, err, "") fd.Close() return nil, err } state.Ctx.LogD("sp-file", sdsp, "writing") _, err = fd.Write(file.Payload) if err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-file", sdsp, err, "") fd.Close() return nil, err } - ourSize := uint64(file.Offset) + uint64(len(file.Payload)) + ourSize := file.Offset + uint64(len(file.Payload)) state.RLock() - sdsp["fullsize"] = strconv.FormatInt(int64(state.infosTheir[*file.Hash].Size), 10) - sdsp["size"] = strconv.FormatInt(int64(ourSize), 10) - state.Ctx.LogP("sp-file", sdsp, "") + sdsp["size"] = int64(ourSize) + sdsp["fullsize"] = int64(state.infosTheir[*file.Hash].Size) + if state.Ctx.ShowPrgrs { + Progress(sdsp) + } if state.infosTheir[*file.Hash].Size != ourSize { state.RUnlock() fd.Close() @@ -891,7 +887,7 @@ spWorkersGroup.Wait() spWorkersGroup.Add(1) go func() { if err := fd.Sync(); err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "sync") + state.Ctx.LogE("sp-file", sdsp, err, "sync") fd.Close() return } @@ -899,19 +895,19 @@ state.wg.Add(1) defer state.wg.Done() fd.Seek(0, io.SeekStart) state.Ctx.LogD("sp-file", sdsp, "checking") - gut, err := Check(fd, file.Hash[:]) + gut, err := Check(fd, file.Hash[:], sdsp, state.Ctx.ShowPrgrs) fd.Close() if err != nil || !gut { - state.Ctx.LogE("sp-file", sdsp, "checksum mismatch") + state.Ctx.LogE("sp-file", sdsp, errors.New("checksum mismatch"), "") return } state.Ctx.LogI("sp-done", SdsAdd(sdsp, SDS{"xx": string(TRx)}), "") if err = os.Rename(filePath+PartSuffix, filePath); err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "rename") + state.Ctx.LogE("sp-file", sdsp, err, "rename") return } if err = DirSync(dirToSync); err != nil { - state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "sync") + state.Ctx.LogE("sp-file", sdsp, err, "sync") return } state.Lock() @@ -927,13 +923,10 @@ sdsp := SdsAdd(sds, SDS{"type": "done"}) state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet") var done SPDone if _, err = xdr.Unmarshal(r, &done); err != nil { - state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{ - "type": "done", - "err": err, - }), "") + state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"type": "done"}), err, "") return nil, err } - sdsp["hash"] = ToBase32(done.Hash[:]) + sdsp["pkt"] = ToBase32(done.Hash[:]) state.Ctx.LogD("sp-done", sdsp, "removing") err := os.Remove(filepath.Join( state.Ctx.Spool, @@ -945,18 +938,18 @@ sdsp["xx"] = string(TTx) if err == nil { state.Ctx.LogI("sp-done", sdsp, "") } else { - state.Ctx.LogE("sp-done", sdsp, "") + state.Ctx.LogE("sp-done", sdsp, err, "") } case SPTypeFreq: sdsp := SdsAdd(sds, SDS{"type": "freq"}) state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet") var freq SPFreq if _, err = xdr.Unmarshal(r, &freq); err != nil { - state.Ctx.LogE("sp-process", SdsAdd(sdsp, SDS{"err": err}), "") + state.Ctx.LogE("sp-process", sdsp, err, "") return nil, err } - sdsp["hash"] = ToBase32(freq.Hash[:]) - sdsp["offset"] = strconv.FormatInt(int64(freq.Offset), 10) + sdsp["pkt"] = ToBase32(freq.Hash[:]) + sdsp["offset"] = freq.Offset state.Ctx.LogD("sp-process", sdsp, "queueing") nice, exists := state.infosOurSeen[*freq.Hash] if exists { @@ -988,7 +981,8 @@ default: state.Ctx.LogE( "sp-process", SdsAdd(sds, SDS{"type": head.Type}), - "unknown", + errors.New("unknown type"), + "", ) return nil, BadPktType } @@ -1005,8 +999,8 @@ state.RUnlock() state.Ctx.LogI("sp-infos", SDS{ "xx": string(TRx), "node": state.Node.Id, - "pkts": strconv.Itoa(pkts), - "size": strconv.FormatInt(int64(size), 10), + "pkts": pkts, + "size": int64(size), }, "") } return payloadsSplit(replies), nil diff --git a/src/toss.go b/src/toss.go index bf8e4edd44bf0f9d3062b181d428f6d4f936019692a5cd7efc433262bafdf397..998dca2ca932e065198c92ec5e7dd5b925785498539a18d7242145ce5d78e55f 100644 --- a/src/toss.go +++ b/src/toss.go @@ -21,6 +21,7 @@ import ( "bufio" "bytes" "encoding/base64" + "errors" "fmt" "io" "io/ioutil" @@ -78,9 +79,7 @@ for job := range ctx.Jobs(nodeId, TRx) { pktName := filepath.Base(job.Fd.Name()) sds := SDS{"node": job.PktEnc.Sender, "pkt": pktName} if job.PktEnc.Nice > nice { - ctx.LogD("rx", SdsAdd(sds, SDS{ - "nice": strconv.Itoa(int(job.PktEnc.Nice)), - }), "too nice") + ctx.LogD("rx", SdsAdd(sds, SDS{"nice": int(job.PktEnc.Nice)}), "too nice") continue } pipeR, pipeW := io.Pipe() @@ -98,7 +97,7 @@ pipeWB.Flush() pipeW.Close() job.Fd.Close() if err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "decryption") + ctx.LogE("rx", sds, err, "decryption") } }(job) var pkt Pkt @@ -106,7 +105,7 @@ var err error var pktSize int64 var pktSizeBlocks int64 if _, err = xdr.Unmarshal(pipeR, &pkt); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "unmarshal") + ctx.LogE("rx", sds, err, "unmarshal") isBad = true goto Closing } @@ -116,7 +115,7 @@ if pktSize%(EncBlkSize+poly1305.TagSize) != 0 { pktSize -= poly1305.TagSize } pktSize -= pktSizeBlocks * poly1305.TagSize - sds["size"] = strconv.FormatInt(pktSize, 10) + sds["size"] = pktSize ctx.LogD("rx", sds, "taken") switch pkt.Type { case PktTypeExec: @@ -137,7 +136,7 @@ }) sender := ctx.Neigh[*job.PktEnc.Sender] cmdline, exists := sender.Exec[handle] if !exists || len(cmdline) == 0 { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": "No handle found"}), "") + ctx.LogE("rx", sds, errors.New("No handle found"), "") isBad = true goto Closing } @@ -158,7 +157,7 @@ ) cmd.Stdin = decompressor output, err := cmd.Output() if err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "handle") + ctx.LogE("rx", sds, err, "handle") isBad = true goto Closing } @@ -187,7 +186,7 @@ fd.Close() } } if err = os.Remove(job.Fd.Name()); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove") + ctx.LogE("rx", sds, err, "remove") isBad = true } } @@ -198,46 +197,51 @@ } dst := string(pkt.Path[:int(pkt.PathLen)]) sds := SdsAdd(sds, SDS{"type": "file", "dst": dst}) if filepath.IsAbs(dst) { - ctx.LogE("rx", sds, "non-relative destination path") + ctx.LogE("rx", sds, errors.New("non-relative destination path"), "") isBad = true goto Closing } incoming := ctx.Neigh[*job.PktEnc.Sender].Incoming if incoming == nil { - ctx.LogE("rx", sds, "incoming is not allowed") + ctx.LogE("rx", sds, errors.New("incoming is not allowed"), "") isBad = true goto Closing } dir := filepath.Join(*incoming, path.Dir(dst)) if err = os.MkdirAll(dir, os.FileMode(0777)); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "mkdir") + ctx.LogE("rx", sds, err, "mkdir") isBad = true goto Closing } if !dryRun { tmp, err := TempFile(dir, "file") if err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "mktemp") + ctx.LogE("rx", sds, err, "mktemp") isBad = true goto Closing } sds["tmp"] = tmp.Name() ctx.LogD("rx", sds, "created") bufW := bufio.NewWriter(tmp) - if _, err = io.Copy(bufW, pipeR); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy") + if _, err = CopyProgressed( + bufW, + pipeR, + SdsAdd(sds, SDS{"fullsize": sds["size"]}), + ctx.ShowPrgrs, + ); err != nil { + ctx.LogE("rx", sds, err, "copy") isBad = true goto Closing } if err = bufW.Flush(); err != nil { tmp.Close() - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy") + ctx.LogE("rx", sds, err, "copy") isBad = true goto Closing } if err = tmp.Sync(); err != nil { tmp.Close() - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy") + ctx.LogE("rx", sds, err, "copy") isBad = true goto Closing } @@ -250,7 +254,7 @@ if _, err = os.Stat(dstPath); err != nil { if os.IsNotExist(err) { break } - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "stat") + ctx.LogE("rx", sds, err, "stat") isBad = true goto Closing } @@ -258,11 +262,11 @@ dstPath = dstPathOrig + "." + strconv.Itoa(dstPathCtr) dstPathCtr++ } if err = os.Rename(tmp.Name(), dstPath); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "rename") + ctx.LogE("rx", sds, err, "rename") isBad = true } if err = DirSync(*incoming); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "sync") + ctx.LogE("rx", sds, err, "sync") isBad = true } delete(sds, "tmp") @@ -275,7 +279,7 @@ fd.Close() } } if err = os.Remove(job.Fd.Name()); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove") + ctx.LogE("rx", sds, err, "remove") isBad = true } if len(sendmail) > 0 && ctx.NotifyFile != nil { @@ -298,14 +302,14 @@ goto Closing } src := string(pkt.Path[:int(pkt.PathLen)]) if filepath.IsAbs(src) { - ctx.LogE("rx", sds, "non-relative source path") + ctx.LogE("rx", sds, errors.New("non-relative source path"), "") isBad = true goto Closing } sds := SdsAdd(sds, SDS{"type": "freq", "src": src}) dstRaw, err := ioutil.ReadAll(pipeR) if err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "read") + ctx.LogE("rx", sds, err, "read") isBad = true goto Closing } @@ -314,7 +318,7 @@ sds["dst"] = dst sender := ctx.Neigh[*job.PktEnc.Sender] freqPath := sender.FreqPath if freqPath == nil { - ctx.LogE("rx", sds, "freqing is not allowed") + ctx.LogE("rx", sds, errors.New("freqing is not allowed"), "") isBad = true goto Closing } @@ -329,7 +333,7 @@ sender.FreqMinSize, sender.FreqMaxSize, ) if err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "tx file") + ctx.LogE("rx", sds, err, "tx file") isBad = true goto Closing } @@ -342,7 +346,7 @@ fd.Close() } } if err = os.Remove(job.Fd.Name()); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove") + ctx.LogE("rx", sds, err, "remove") isBad = true } if len(sendmail) > 0 && ctx.NotifyFreq != nil { @@ -366,14 +370,14 @@ nodeId := NodeId(*dst) node, known := ctx.Neigh[nodeId] sds := SdsAdd(sds, SDS{"type": "trns", "dst": nodeId}) if !known { - ctx.LogE("rx", sds, "unknown node") + ctx.LogE("rx", sds, errors.New("unknown node"), "") isBad = true goto Closing } ctx.LogD("rx", sds, "taken") if !dryRun { if err = ctx.TxTrns(node, job.PktEnc.Nice, pktSize, pipeR); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "tx trns") + ctx.LogE("rx", sds, err, "tx trns") isBad = true goto Closing } @@ -386,12 +390,12 @@ fd.Close() } } if err = os.Remove(job.Fd.Name()); err != nil { - ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove") + ctx.LogE("rx", sds, err, "remove") isBad = true } } default: - ctx.LogE("rx", sds, "unknown type") + ctx.LogE("rx", sds, errors.New("unknown type"), "") isBad = true } Closing: diff --git a/src/tx.go b/src/tx.go index 72e0868a171c8b66cb45d2fa775c72b137cbb5ceddf6eb69c82bcd3dcb0fb352..aa293d2cd76645d7022b026de069f085bcd3a0a0bf9c5dc2a174330c6f6bad5f 100644 --- a/src/tx.go +++ b/src/tx.go @@ -51,6 +51,7 @@ pkt *Pkt, nice uint8, size, minSize int64, src io.Reader, + pktName string, ) (*Node, error) { hops := make([]*Node, 0, 1+len(node.Via)) hops = append(hops, node) @@ -81,8 +82,8 @@ pipeR, pipeW := io.Pipe() go func(size int64, src io.Reader, dst io.WriteCloser) { ctx.LogD("tx", SDS{ "node": hops[0].Id, - "nice": strconv.Itoa(int(nice)), - "size": strconv.FormatInt(size, 10), + "nice": int(nice), + "size": size, }, "wrote") errs <- PktEncWrite(ctx.Self, hops[0], pkt, nice, size, padSize, src, dst) dst.Close() @@ -97,8 +98,8 @@ pipeR, pipeW = io.Pipe() go func(node *Node, pkt *Pkt, size int64, src io.Reader, dst io.WriteCloser) { ctx.LogD("tx", SDS{ "node": node.Id, - "nice": strconv.Itoa(int(nice)), - "size": strconv.FormatInt(size, 10), + "nice": int(nice), + "size": size, }, "trns wrote") errs <- PktEncWrite(ctx.Self, node, pkt, nice, size, 0, src, dst) dst.Close() @@ -106,7 +107,11 @@ }(hops[i], pktTrns, curSize, pipeRPrev, pipeW) curSize = PktEncOverhead + PktSizeOverhead + sizeWithTags(PktOverhead+curSize) } go func() { - _, err := io.Copy(tmp.W, pipeR) + _, err := CopyProgressed( + tmp.W, pipeR, + SDS{"xx": string(TTx), "pkt": pktName, "fullsize": curSize}, + ctx.ShowPrgrs, + ) errs <- err }() for i := 0; i <= len(hops); i++ { @@ -320,19 +325,19 @@ pkt, err := NewPkt(PktTypeFile, nice, []byte(dstPath)) if err != nil { return err } - _, err = ctx.Tx(node, pkt, nice, fileSize, minSize, reader) + _, err = ctx.Tx(node, pkt, nice, fileSize, minSize, reader, dstPath) sds := SDS{ "type": "file", "node": node.Id, - "nice": strconv.Itoa(int(nice)), + "nice": int(nice), "src": srcPath, "dst": dstPath, - "size": strconv.FormatInt(fileSize, 10), + "size": fileSize, } if err == nil { ctx.LogI("tx", sds, "sent") } else { - ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent") + ctx.LogE("tx", sds, err, "sent") } return err } @@ -375,19 +380,20 @@ nice, sizeToSend, minSize, io.TeeReader(reader, hsh), + path, ) sds := SDS{ "type": "file", "node": node.Id, - "nice": strconv.Itoa(int(nice)), + "nice": int(nice), "src": srcPath, "dst": path, - "size": strconv.FormatInt(sizeToSend, 10), + "size": sizeToSend, } if err == nil { ctx.LogI("tx", sds, "sent") } else { - ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent") + ctx.LogE("tx", sds, err, "sent") return err } hsh.Sum(metaPkt.Checksums[chunkNum][:0]) @@ -408,19 +414,19 @@ if err != nil { return err } metaPktSize := int64(metaBuf.Len()) - _, err = ctx.Tx(node, pkt, nice, metaPktSize, minSize, &metaBuf) + _, err = ctx.Tx(node, pkt, nice, metaPktSize, minSize, &metaBuf, path) sds := SDS{ "type": "file", "node": node.Id, - "nice": strconv.Itoa(int(nice)), + "nice": int(nice), "src": srcPath, "dst": path, - "size": strconv.FormatInt(metaPktSize, 10), + "size": metaPktSize, } if err == nil { ctx.LogI("tx", sds, "sent") } else { - ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent") + ctx.LogE("tx", sds, err, "sent") } return err } @@ -444,19 +450,19 @@ return err } src := strings.NewReader(dstPath) size := int64(src.Len()) - _, err = ctx.Tx(node, pkt, nice, size, minSize, src) + _, err = ctx.Tx(node, pkt, nice, size, minSize, src, srcPath) sds := SDS{ "type": "freq", "node": node.Id, - "nice": strconv.Itoa(int(nice)), - "replynice": strconv.Itoa(int(replyNice)), + "nice": int(nice), + "replynice": int(replyNice), "src": srcPath, "dst": dstPath, } if err == nil { ctx.LogI("tx", sds, "sent") } else { - ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent") + ctx.LogE("tx", sds, err, "sent") } return err } @@ -492,19 +498,19 @@ if err != nil { return err } size := int64(compressed.Len()) - _, err = ctx.Tx(node, pkt, nice, size, minSize, &compressed) + _, err = ctx.Tx(node, pkt, nice, size, minSize, &compressed, handle) sds := SDS{ "type": "exec", "node": node.Id, - "nice": strconv.Itoa(int(nice)), - "replynice": strconv.Itoa(int(replyNice)), + "nice": int(nice), + "replynice": int(replyNice), "dst": strings.Join(append([]string{handle}, args...), " "), - "size": strconv.FormatInt(size, 10), + "size": size, } if err == nil { ctx.LogI("tx", sds, "sent") } else { - ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent") + ctx.LogE("tx", sds, err, "sent") } return err } @@ -513,20 +519,24 @@ func (ctx *Ctx) TxTrns(node *Node, nice uint8, size int64, src io.Reader) error { sds := SDS{ "type": "trns", "node": node.Id, - "nice": strconv.Itoa(int(nice)), - "size": strconv.FormatInt(size, 10), + "nice": int(nice), + "size": size, } ctx.LogD("tx", sds, "taken") if !ctx.IsEnoughSpace(size) { err := errors.New("is not enough space") - ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), err.Error()) + ctx.LogE("tx", sds, err, err.Error()) return err } tmp, err := ctx.NewTmpFileWHash() if err != nil { return err } - if _, err = io.Copy(tmp.W, src); err != nil { + if _, err = CopyProgressed(tmp.W, src, SDS{ + "xx": string(TTx), + "pkt": node.Id.String(), + "fullsize": size, + }, ctx.ShowPrgrs); err != nil { return err } nodePath := filepath.Join(ctx.Spool, node.Id.String()) diff --git a/src/tx_test.go b/src/tx_test.go index 1749d1dd70738450fee6ca2eac066afa555b55c191b13b7a9fa2723f322ba71d..4827ad3f2dc657f1f0d0ec50b41a6f6b1a16700c66e47219b5fb3ca3772ac703 100644 --- a/src/tx_test.go +++ b/src/tx_test.go @@ -84,6 +84,7 @@ 123, int64(src.Len()), int64(padSize), src, + "pktName", ) if err != nil { return false diff --git a/src/uilive/LICENSE b/src/uilive/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9e8cf7d80722a04b466207b890302621be3cad009b0dd8822a3d8f9a3f8bc62a --- /dev/null +++ b/src/uilive/LICENSE @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (c) 2015, Greg Osuri + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/uilive/README.md b/src/uilive/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13ceee5dbc11a26741765f265319e92d6d84305e8ab673e5fc9118df86e44717 --- /dev/null +++ b/src/uilive/README.md @@ -0,0 +1,31 @@ +# uilive [![GoDoc](https://godoc.org/github.com/gosuri/uilive?status.svg)](https://godoc.org/github.com/gosuri/uilive) [![Build Status](https://travis-ci.org/gosuri/uilive.svg?branch=master)](https://travis-ci.org/gosuri/uilive) + +uilive is a go library for updating terminal output in realtime. It provides a buffered [io.Writer](https://golang.org/pkg/io/#Writer) that is flushed at a timed interval. uilive powers [uiprogress](https://github.com/gosuri/uiprogress). + +## Usage Example + +Calling `uilive.New()` will create a new writer. To start rendering, simply call `writer.Start()` and update the ui by writing to the `writer`. Full source for the below example is in [example/main.go](example/main.go). + +```go +writer := uilive.New() +// start listening for updates and render +writer.Start() + +for i := 0; i <= 100; i++ { + fmt.Fprintf(writer, "Downloading.. (%d/%d) GB\n", i, 100) + time.Sleep(time.Millisecond * 5) +} + +fmt.Fprintln(writer, "Finished: Downloaded 100GB") +writer.Stop() // flush and stop rendering +``` + +The above will render + +![example](doc/example.gif) + +## Installation + +```sh +$ go get -v github.com/gosuri/uilive +``` diff --git a/src/uilive/doc.go b/src/uilive/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..308171fb2eb80c72b395cdabb55c2e7932edb1a2e5fa903be156385f3dfb1269 --- /dev/null +++ b/src/uilive/doc.go @@ -0,0 +1,2 @@ +// Package uilive provides a writer that live updates the terminal. It provides a buffered io.Writer that is flushed at a timed interval. +package uilive diff --git a/src/uilive/terminal_size.go b/src/uilive/terminal_size.go new file mode 100644 index 0000000000000000000000000000000000000000..1ee4526023275659efced90d7f6c055928ce9b10166144e0adae7e66eb330d14 --- /dev/null +++ b/src/uilive/terminal_size.go @@ -0,0 +1,37 @@ +// +build !windows + +package uilive + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +type windowSize struct { + rows uint16 + cols uint16 +} + +var out *os.File +var err error +var sz windowSize + +func getTermSize() (int, int) { + if runtime.GOOS == "openbsd" { + out, err = os.OpenFile("/dev/tty", os.O_RDWR, 0) + if err != nil { + return 0, 0 + } + + } else { + out, err = os.OpenFile("/dev/tty", os.O_WRONLY, 0) + if err != nil { + return 0, 0 + } + } + _, _, _ = syscall.Syscall(syscall.SYS_IOCTL, + out.Fd(), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz))) + return int(sz.cols), int(sz.rows) +} diff --git a/src/uilive/writer.go b/src/uilive/writer.go new file mode 100644 index 0000000000000000000000000000000000000000..f6051461a9b176b8d264957f1489cbcefdb6469fb46ebf26a1473c8cb4733bf3 --- /dev/null +++ b/src/uilive/writer.go @@ -0,0 +1,144 @@ +// This is a fork of github.com/gosuri/uilive for NNCP project +// * It does not buffer all the writes, but resets the buffer +// just only for single latest line. Some terminals have +// huge CPU usage if so much data (as copied files progress) +// is printed +// * By default it uses stderr +// * By default it uses 10ms refresh period +// * defer-s are removed for less CPU usage +// * By default it uses stderr +// * By default it uses stderr +// * By default it uses stderr +// * Removed newline/bypass related code. No Windows support + +package uilive + +import ( + "bytes" + "fmt" + "io" + "os" + "sync" + "time" +) + +// ESC is the ASCII code for escape character +const ESC = 27 + +// RefreshInterval is the default refresh interval to update the ui +var RefreshInterval = 10 * time.Millisecond + +var overFlowHandled bool + +var termWidth int + +// Out is the default output writer for the Writer +var Out = os.Stdout + +// FdWriter is a writer with a file descriptor. +type FdWriter interface { + io.Writer + Fd() uintptr +} + +// Writer is a buffered the writer that updates the terminal. The contents of writer will be flushed on a timed interval or when Flush is called. +type Writer struct { + // Out is the writer to write to + Out io.Writer + + // RefreshInterval is the time the UI sould refresh + RefreshInterval time.Duration + + ticker *time.Ticker + tdone chan struct{} + + buf bytes.Buffer + mtx *sync.Mutex +} + +// New returns a new Writer with defaults +func New() *Writer { + termWidth, _ = getTermSize() + if termWidth != 0 { + overFlowHandled = true + } + return &Writer{ + Out: Out, + RefreshInterval: RefreshInterval, + mtx: &sync.Mutex{}, + } +} + +// clear the line and move the cursor up +var clear = fmt.Sprintf("%c[%dA%c[2K", ESC, 1, ESC) + +func (w *Writer) clearLines() { + fmt.Fprint(w.Out, clear) +} + +// Flush writes to the out and resets the buffer. It should be called after the last call to Write to ensure that any data buffered in the Writer is written to output. +// Any incomplete escape sequence at the end is considered complete for formatting purposes. +// An error is returned if the contents of the buffer cannot be written to the underlying output stream +func (w *Writer) Flush() (err error) { + w.mtx.Lock() + // do nothing if buffer is empty + if len(w.buf.Bytes()) == 0 { + w.mtx.Unlock() + return + } + w.clearLines() + var currentLine bytes.Buffer + for _, b := range w.buf.Bytes() { + if b == '\n' { + currentLine.Reset() + } else { + currentLine.Write([]byte{b}) + if overFlowHandled && currentLine.Len() > termWidth { + currentLine.Reset() + } + } + } + _, err = w.Out.Write(w.buf.Bytes()) + w.mtx.Unlock() + return +} + +// Start starts the listener in a non-blocking manner +func (w *Writer) Start() { + w.ticker = time.NewTicker(w.RefreshInterval) + w.tdone = make(chan struct{}, 0) + w.Out.Write([]byte("\n")) + go w.Listen() +} + +// Stop stops the listener that updates the terminal +func (w *Writer) Stop() { + w.Flush() + close(w.tdone) +} + +// Listen listens for updates to the writer's buffer and flushes to the out provided. It blocks the runtime. +func (w *Writer) Listen() { + for { + select { + case <-w.ticker.C: + if w.ticker != nil { + w.Flush() + } + case <-w.tdone: + w.mtx.Lock() + w.ticker.Stop() + w.mtx.Unlock() + return + } + } +} + +// Write save the contents of buf to the writer b. The only errors returned are ones encountered while writing to the underlying buffer. +func (w *Writer) Write(buf []byte) (n int, err error) { + w.mtx.Lock() + w.buf.Reset() + n, err = w.buf.Write(buf) + w.mtx.Unlock() + return +}