--- /dev/null
+$ go get go.stargrave.org/syncer
-# syncer
-
-Fast stateful file/disk data syncer.
+syncer -- fast stateful file/disk data syncer.
WARNING: I advise you to use ZFS and its native send/recv commands.
I abandoned that project because of that.
-## Description
-
The main purpose of this utility is fast data synchronization between
two hard drives: one is fast (SSD, SATA HDD), another is connected
through slow USB interface. Target is to lower data amounts needed to
statefile and uses it to determine if we need to update block of data.
# sync from very fast SSD to slow USB connected HDD
- % ./syncer -src /dev/ada0 -dst /dev/da0 -state state.bin
+ $ syncer -src /dev/ada0 -dst /dev/da0 -state state.bin
[%%%%%%]
# all blocks were transferred to da0
Now we have statefile containing cryptographic hashes of the blocks from
source and copy of all read data in destination. Now if we run it again:
- % ./syncer -src /dev/ada0 -dst /dev/da0 -state state.bin
+ $ syncer -src /dev/ada0 -dst /dev/da0 -state state.bin
[....%.]
# only one block was transferred to da0
writes are sequential.
syncer is free software: see the file COPYING for copying conditions.
-
-## Installation
-
- % mkdir -p src
- % git clone git://git.cypherpunks.ru/syncer.git src/syncer
- % export GOPATH=$(pwd)
- % go get github.com/dchest/blake2b
- % go build syncer # syncer executable file should be in current directory
-
-## Statefile Format
-
- SRC_SIZE || BLK_SIZE || HASH0 || HASH1 || ...
-
-`SRC_SIZE` contains size of the source, when it was initially read.
-`BLK_SIZE` is the blocksize used. Both are 64-bit big-endian unsigned
-integers. If either size or blocksize differs, then syncer will deny
-using that statefile as a precaution. `HASHx` is BLAKE2b-512 hash
-output, 64 bytes.
--- /dev/null
+ SRC_SIZE || BLK_SIZE || HASH0 || HASH1 || ...
+
+SRC_SIZE contains size of the source, when it was initially read.
+BLK_SIZE is the blocksize used. Both are 64-bit big-endian unsigned
+integers. If either size or blocksize differs, then syncer will deny
+using that statefile as a precaution. HASHx is BLAKE3-256 hash output.
--- /dev/null
+module go.stargrave.org/syncer
+
+go 1.18
+
+require lukechampine.com/blake3 v1.1.7
+
+require github.com/klauspost/cpuid/v2 v2.0.9 // indirect
--- /dev/null
+github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
+lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
/*
syncer -- stateful file/device data syncer.
-Copyright (C) 2015-2020 Sergey Matveev <stargrave@stargrave.org>
+Copyright (C) 2015-2022 Sergey Matveev <stargrave@stargrave.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
"os"
"runtime"
- "github.com/dchest/blake2b"
+ "lukechampine.com/blake3"
)
+const HashSize = 256 / 8
+
var (
blkSize = flag.Int64("blk", 2*1<<10, "Block size (KiB)")
statePath = flag.String("state", "state.bin", "Path to statefile")
defer dst.Close()
// Check if we already have statefile and read the state
- state := make([]byte, blake2b.Size*blocks)
+ state := make([]byte, HashSize*blocks)
var i int64
var tmp []byte
if _, err := os.Stat(*statePath); err == nil {
sync := make(chan SyncEvent)
syncs <- sync
go func(i int64) {
- sum := blake2b.Sum512(buf[:n])
- sumState := state[i*blake2b.Size : i*blake2b.Size+blake2b.Size]
+ sum := blake3.Sum256(buf[:n])
+ sumState := state[i*HashSize : i*HashSize+HashSize]
if bytes.Compare(sumState, sum[:]) != 0 {
sync <- SyncEvent{i, buf, buf[:n]}
prn.Changed()