]> Sergey Matveev's repositories - btrtrc.git/commitdiff
Add linter CI (#542)
authorBora M. Alper <bora@boramalper.org>
Mon, 16 Aug 2021 01:11:31 +0000 (04:11 +0300)
committerGitHub <noreply@github.com>
Mon, 16 Aug 2021 01:11:31 +0000 (11:11 +1000)
* Add linter CI

Signed-off-by: Bora M. Alper <bora@boramalper.org>
* Make gosec CI ignore SHA1 and upload sarif

Signed-off-by: Bora M. Alper <bora@boramalper.org>
* Fix formatting of source files

Signed-off-by: Bora M. Alper <bora@boramalper.org>
* Make go vet ignore unkeyed composite literals and fix other warnings

Signed-off-by: Bora M. Alper <bora@boramalper.org>
* Make staticcheck ignore unused methods and fix other warnings

Signed-off-by: Bora M. Alper <bora@boramalper.org>
* Use golangci-lint

Signed-off-by: Bora M. Alper <bora@boramalper.org>
12 files changed:
.github/workflows/linter.yml [new file with mode: 0644]
.golangci.yml [new file with mode: 0644]
bencode/decode.go
client.go
peer_protocol/messagetype_string.go
peer_protocol/protocol.go
pex_test.go
requesting.go
storage/sqlite/sqlite-storage.go
torrent.go
tracker/udp/conn-client.go
util/dirwatch/dirwatch.go

diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml
new file mode 100644 (file)
index 0000000..397e68d
--- /dev/null
@@ -0,0 +1,37 @@
+name: Linter
+
+on:
+  push:
+    branches: [ master ]
+  pull_request:
+    branches: [ master ]
+
+jobs:
+  golint:
+    name: Lint
+    runs-on: ubuntu-latest
+    env:
+      GO111MODULE: on
+    steps:
+      - uses: actions/checkout@v2
+      - uses: golangci/golangci-lint-action@v2
+        with:
+          version: latest
+
+          # Optional: working directory, useful for monorepos
+          # working-directory: somedir
+
+          # Optional: golangci-lint command line arguments.
+          # args: --issues-exit-code=0
+
+          # Optional: show only new issues if it's a pull request. The default value is `false`.
+          only-new-issues: true
+
+          # Optional: if set to true then the action will use pre-installed Go.
+          skip-go-installation: true
+
+          # Optional: if set to true then the action don't cache or restore ~/go/pkg.
+          # skip-pkg-cache: true
+
+          # Optional: if set to true then the action don't cache or restore ~/.cache/go-build.
+          # skip-build-cache: true
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644 (file)
index 0000000..3a38b69
--- /dev/null
@@ -0,0 +1,9 @@
+linters-settings:
+  staticcheck:
+    # Select the Go version to target. The default is '1.13'.
+    go: "1.15"
+    checks: ["all", "-U1000"]
+
+  govet:
+    disable:
+      - composites
index 5dbcbe5a831f253e6dd79e561487e1553672923d..08d7861c7e6a03cadc766e61bc700b86edf64cab 100644 (file)
@@ -672,7 +672,7 @@ func (d *Decoder) parseListInterface() interface{} {
                list = append(list, valuei)
        }
        if list == nil {
-               list = make([]interface{}, 0, 0)
+               list = make([]interface{}, 0)
        }
        return list
 }
index 26951e4815d9618975b9fec84be1ee89670c28a0..4815a0cd4561edb27f18d1d147d4a07e0d3b58da 100644 (file)
--- a/client.go
+++ b/client.go
@@ -916,11 +916,11 @@ func (cl *Client) runHandshookConn(c *PeerConn, t *Torrent) error {
                        connsToSelf.Add(1)
                        addr := c.conn.RemoteAddr().String()
                        cl.dopplegangerAddrs[addr] = struct{}{}
-               } else {
+               } /* else {
                        // Because the remote address is not necessarily the same as its client's torrent listen
                        // address, we won't record the remote address as a doppleganger. Instead, the initiator
                        // can record *us* as the doppleganger.
-               }
+               } */
                return errors.New("local and remote peer ids are the same")
        }
        c.conn.SetWriteDeadline(time.Time{})
index b543cd52a7fa8128be54ebfa597e195b3be56da7..7be19f4275b8a286084bf33564ebb4f44db73d92 100644 (file)
@@ -17,7 +17,7 @@ var (
 
 func (i MessageType) String() string {
        switch {
-       case 0 <= i && i <= 9:
+       case i <= 9:
                return _MessageType_name_0[_MessageType_index_0[i]:_MessageType_index_0[i+1]]
        case 13 <= i && i <= 17:
                i -= 13
index 2086063f91102a5fc21997167de268e3ea76bd07..05c6657a8375971a00d2a1cd04ad7f0ff3e8a9ac 100644 (file)
@@ -42,6 +42,6 @@ const (
        HandshakeExtendedID = 0
 
        RequestMetadataExtensionMsgType ExtendedMetadataRequestMsgType = 0
-       DataMetadataExtensionMsgType                                   = 1
-       RejectMetadataExtensionMsgType                                 = 2
+       DataMetadataExtensionMsgType    ExtendedMetadataRequestMsgType = 1
+       RejectMetadataExtensionMsgType  ExtendedMetadataRequestMsgType = 2
 )
index 5b27683fd626edcc2659b899c0b4f3e7f759c257..54866596a48445c024f66e0da0adaad86089b4b2 100644 (file)
@@ -39,7 +39,7 @@ func TestPexAdded(t *testing.T) {
                s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[0], outgoing: true}})
                targ := &pexState{
                        ev: []pexEvent{
-                               pexEvent{pexAdd, addrs[0], pp.PexOutgoingConn},
+                               {pexAdd, addrs[0], pp.PexOutgoingConn},
                        },
                        nc: 1,
                }
@@ -48,17 +48,17 @@ func TestPexAdded(t *testing.T) {
        t.Run("belowTarg", func(t *testing.T) {
                s := &pexState{
                        hold: []pexEvent{
-                               pexEvent{pexDrop, addrs[1], 0},
+                               {pexDrop, addrs[1], 0},
                        },
                        nc: 0,
                }
                s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}})
                targ := &pexState{
                        hold: []pexEvent{
-                               pexEvent{pexDrop, addrs[1], 0},
+                               {pexDrop, addrs[1], 0},
                        },
                        ev: []pexEvent{
-                               pexEvent{pexAdd, addrs[0], 0},
+                               {pexAdd, addrs[0], 0},
                        },
                        nc: 1,
                }
@@ -68,7 +68,7 @@ func TestPexAdded(t *testing.T) {
                holdAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
                s := &pexState{
                        hold: []pexEvent{
-                               pexEvent{pexDrop, holdAddr, 0},
+                               {pexDrop, holdAddr, 0},
                        },
                        nc: pexTargAdded,
                }
@@ -76,8 +76,8 @@ func TestPexAdded(t *testing.T) {
                targ := &pexState{
                        hold: []pexEvent{},
                        ev: []pexEvent{
-                               pexEvent{pexDrop, holdAddr, 0},
-                               pexEvent{pexAdd, addrs[0], 0},
+                               {pexDrop, holdAddr, 0},
+                               {pexAdd, addrs[0], 0},
                        },
                        nc: pexTargAdded + 1,
                }
@@ -90,7 +90,7 @@ func TestPexDropped(t *testing.T) {
                s := &pexState{nc: 1}
                s.Drop(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}, pex: pexConnState{Listed: true}})
                targ := &pexState{
-                       hold: []pexEvent{pexEvent{pexDrop, addrs[0], 0}},
+                       hold: []pexEvent{{pexDrop, addrs[0], 0}},
                        nc:   0,
                }
                require.EqualValues(t, targ, s)
@@ -99,7 +99,7 @@ func TestPexDropped(t *testing.T) {
                s := &pexState{nc: pexTargAdded + 1}
                s.Drop(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}, pex: pexConnState{Listed: true}})
                targ := &pexState{
-                       ev: []pexEvent{pexEvent{pexDrop, addrs[0], 0}},
+                       ev: []pexEvent{{pexDrop, addrs[0], 0}},
                        nc: pexTargAdded,
                }
                require.EqualValues(t, targ, s)
@@ -114,8 +114,8 @@ func TestPexDropped(t *testing.T) {
 
 func TestPexReset(t *testing.T) {
        s := &pexState{
-               hold: []pexEvent{pexEvent{pexDrop, addrs[0], 0}},
-               ev:   []pexEvent{pexEvent{pexAdd, addrs[1], 0}},
+               hold: []pexEvent{{pexDrop, addrs[0], 0}},
+               ev:   []pexEvent{{pexAdd, addrs[1], 0}},
                nc:   1,
        }
        s.Reset()
@@ -149,10 +149,10 @@ var testcases = []struct {
                name: "add4",
                in: &pexState{
                        ev: []pexEvent{
-                               pexEvent{pexAdd, addrs[0], f},
-                               pexEvent{pexAdd, addrs[1], f},
-                               pexEvent{pexAdd, addrs[2], f},
-                               pexEvent{pexAdd, addrs[3], f},
+                               {pexAdd, addrs[0], f},
+                               {pexAdd, addrs[1], f},
+                               {pexAdd, addrs[2], f},
+                               {pexAdd, addrs[3], f},
                        },
                },
                arg: 0,
@@ -175,8 +175,8 @@ var testcases = []struct {
                arg:  0,
                in: &pexState{
                        ev: []pexEvent{
-                               pexEvent{pexDrop, addrs[0], f},
-                               pexEvent{pexDrop, addrs[2], f},
+                               {pexDrop, addrs[0], f},
+                               {pexDrop, addrs[2], f},
                        },
                },
                targM: pp.PexMsg{
@@ -194,9 +194,9 @@ var testcases = []struct {
                arg:  0,
                in: &pexState{
                        ev: []pexEvent{
-                               pexEvent{pexAdd, addrs[0], f},
-                               pexEvent{pexAdd, addrs[1], f},
-                               pexEvent{pexDrop, addrs[0], f},
+                               {pexAdd, addrs[0], f},
+                               {pexAdd, addrs[1], f},
+                               {pexDrop, addrs[0], f},
                        },
                },
                targM: pp.PexMsg{
@@ -212,14 +212,14 @@ var testcases = []struct {
                arg:  0,
                in: &pexState{
                        ev: []pexEvent{
-                               pexEvent{pexAdd, addrs[0], f},
-                               pexEvent{pexAdd, addrs[1], f},
-                               pexEvent{pexAdd, addrs[2], f},
+                               {pexAdd, addrs[0], f},
+                               {pexAdd, addrs[1], f},
+                               {pexAdd, addrs[2], f},
                        },
                        hold: []pexEvent{
-                               pexEvent{pexDrop, addrs[0], f},
-                               pexEvent{pexDrop, addrs[2], f},
-                               pexEvent{pexDrop, addrs[1], f},
+                               {pexDrop, addrs[0], f},
+                               {pexDrop, addrs[2], f},
+                               {pexDrop, addrs[1], f},
                        },
                },
                targM: pp.PexMsg{
@@ -240,8 +240,8 @@ var testcases = []struct {
                arg:  1,
                in: &pexState{
                        ev: []pexEvent{
-                               pexEvent{pexAdd, addrs[0], f},
-                               pexEvent{pexAdd, addrs[1], f},
+                               {pexAdd, addrs[0], f},
+                               {pexAdd, addrs[1], f},
                        },
                },
                targM: pp.PexMsg{
index ed1c8d1558346fb139a09fa58ca4317857c1cf96..15033bb0bf55377fc692bbc09337233d0ee4aa8c 100644 (file)
@@ -127,9 +127,9 @@ func (p *Peer) applyNextRequestState() bool {
                more, err := p.request(req)
                if err != nil {
                        panic(err)
-               } else {
-                       //log.Print(req)
-               }
+               } /* else {
+                       log.Print(req)
+               } */
                if !more {
                        return false
                }
index 0d6cb2d73bf30d27e3f602d4b91b78b299645196..2cdf1e3c0ef65b5f6f26dc443264662dced4e62a 100644 (file)
@@ -137,10 +137,10 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
        }
        err = sqlitex.ExecScript(conn, `
                -- We have to opt into this before creating any tables, or before a vacuum to enable it. It means we
-               -- can trim the database file size with partial vacuums without having to do a full vacuum, which 
+               -- can trim the database file size with partial vacuums without having to do a full vacuum, which
                -- locks everything.
                pragma auto_vacuum=incremental;
-               
+
                create table if not exists blob (
                        name text,
                        last_used timestamp default (datetime('now')),
@@ -148,24 +148,24 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
                        verified bool,
                        primary key (name)
                );
-               
+
                create table if not exists blob_meta (
                        key text primary key,
                        value
                );
 
                create index if not exists blob_last_used on blob(last_used);
-               
-               -- While sqlite *seems* to be faster to get sum(length(data)) instead of 
-               -- sum(length(data)), it may still require a large table scan at start-up or with a 
+
+               -- While sqlite *seems* to be faster to get sum(length(data)) instead of
+               -- sum(length(data)), it may still require a large table scan at start-up or with a
                -- cold-cache. With this we can be assured that it doesn't.
                insert or ignore into blob_meta values ('size', 0);
-               
+
                create table if not exists setting (
                        name primary key on conflict replace,
                        value
                );
-       
+
                create view if not exists deletable_blob as
                with recursive excess (
                        usage_with,
@@ -173,9 +173,9 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
                        blob_rowid,
                        data_length
                ) as (
-                       select * 
+                       select *
                        from (
-                               select 
+                               select
                                        (select value from blob_meta where key='size') as usage_with,
                                        last_used,
                                        rowid,
@@ -184,7 +184,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
                        )
                        where usage_with > (select value from setting where name='capacity')
                        union all
-                       select 
+                       select
                                usage_with-data_length as new_usage_with,
                                blob.last_used,
                                blob.rowid,
@@ -206,14 +206,14 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
                                update blob_meta set value=value+length(cast(new.data as blob)) where key='size';
                                delete from blob where rowid in (select blob_rowid from deletable_blob);
                        end;
-                       
+
                        create trigger if not exists after_update_blob
                        after update of data on blob
                        begin
                                update blob_meta set value=value+length(cast(new.data as blob))-length(cast(old.data as blob)) where key='size';
                                delete from blob where rowid in (select blob_rowid from deletable_blob);
                        end;
-                       
+
                        create trigger if not exists after_delete_blob
                        after delete on blob
                        begin
@@ -368,7 +368,7 @@ func (me *poolFromConn) Close() error {
        return me.conn.Close()
 }
 
-func (poolFromConn) NumConns() int { return 1 }
+func (me *poolFromConn) NumConns() int { return 1 }
 
 type ProviderOpts struct {
        BatchWrites bool
index e7184c2a19b567b949aa7ee896f109ab00d34dd2..7d02539ac9f56b3b48244340215f7284884bc8fd 100644 (file)
@@ -1643,9 +1643,9 @@ func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
                        }
                }
                cl.unlock()
-               if added != 0 {
-                       //log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
-               }
+               // if added != 0 {
+               //      log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
+               // }
        }
 }
 
index d789cc3bde643934b93f041fd587abbf91564474..ac920ed099d8aa4df2e4cd9edc6644b8dadb40c6 100644 (file)
@@ -32,10 +32,10 @@ func (cc *ConnClient) reader() {
                        cc.readErr = err
                        break
                }
-               err = cc.d.Dispatch(b[:n])
-               if err != nil {
-                       //log.Printf("dispatching packet received on %v (%q): %v", cc.conn, string(b[:n]), err)
-               }
+               _ = cc.d.Dispatch(b[:n])
+               // if err != nil {
+               //      log.Printf("dispatching packet received on %v (%q): %v", cc.conn, string(b[:n]), err)
+               // }
        }
 }
 
index dd0135b4d31a9fd7e1dcd8b826f61ce07988b080..94d22e3a54fb0b0351322371e5417d5e7673d44d 100644 (file)
@@ -202,7 +202,7 @@ func New(dirName string) (i *Instance, err error) {
                w:        w,
                dirName:  dirName,
                Events:   make(chan Event),
-               dirState: make(map[metainfo.Hash]entity, 0),
+               dirState: make(map[metainfo.Hash]entity),
                Logger:   log.Default,
        }
        go func() {