Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 622f6a2638 | |||
| e3162b6164 | |||
| 9d1e3ab2f0 | |||
| dd98356479 | |||
| 9307f44601 | |||
| b9f69e4aa1 | |||
| 7a4e7977c3 | |||
| 72fdc255e7 | |||
| 63957ff22a |
31
LICENSE
31
LICENSE
@@ -11,3 +11,34 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
The backoff package (written during my time at Cloudflare) is released
|
||||||
|
under the following license:
|
||||||
|
|
||||||
|
Copyright (c) 2016 CloudFlare Inc.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
|
||||||
|
Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ Contents:
|
|||||||
|
|
||||||
ahash/ Provides hashes from string algorithm specifiers.
|
ahash/ Provides hashes from string algorithm specifiers.
|
||||||
assert/ Error handling, assertion-style.
|
assert/ Error handling, assertion-style.
|
||||||
|
backoff/ Implementation of an intelligent backoff strategy.
|
||||||
cmd/
|
cmd/
|
||||||
atping/ Automated TCP ping, meant for putting in cronjobs.
|
atping/ Automated TCP ping, meant for putting in cronjobs.
|
||||||
certchain/ Display the certificate chain from a
|
certchain/ Display the certificate chain from a
|
||||||
@@ -27,6 +28,7 @@ Contents:
|
|||||||
cruntar/ Untar an archive with hard links, copying instead of
|
cruntar/ Untar an archive with hard links, copying instead of
|
||||||
linking.
|
linking.
|
||||||
csrpubdump/ Dump the public key from an X.509 certificate request.
|
csrpubdump/ Dump the public key from an X.509 certificate request.
|
||||||
|
data_sync/ Sync the user's homedir to external storage.
|
||||||
diskimg/ Write a disk image to a device.
|
diskimg/ Write a disk image to a device.
|
||||||
eig/ EEPROM image generator.
|
eig/ EEPROM image generator.
|
||||||
fragment/ Print a fragment of a file.
|
fragment/ Print a fragment of a file.
|
||||||
|
|||||||
24
backoff/LICENSE
Normal file
24
backoff/LICENSE
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
Copyright (c) 2016 CloudFlare Inc.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
|
||||||
|
Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
83
backoff/README.md
Normal file
83
backoff/README.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# backoff
|
||||||
|
## Go implementation of "Exponential Backoff And Jitter"
|
||||||
|
|
||||||
|
This package implements the backoff strategy described in the AWS
|
||||||
|
Architecture Blog article
|
||||||
|
["Exponential Backoff And Jitter"](http://www.awsarchitectureblog.com/2015/03/backoff.html). Essentially,
|
||||||
|
the backoff has an interval `time.Duration`; the *n<sup>th</sup>* call
|
||||||
|
to backoff will return an a `time.Duration` that is *2 <sup>n</sup> *
|
||||||
|
interval*. If jitter is enabled (which is the default behaviour), the
|
||||||
|
duration is a random value between 0 and *2 <sup>n</sup> * interval*.
|
||||||
|
The backoff is configured with a maximum duration that will not be
|
||||||
|
exceeded; e.g., by default, the longest duration returned is
|
||||||
|
`backoff.DefaultMaxDuration`.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
A `Backoff` is initialised with a call to `New`. Using zero values
|
||||||
|
causes it to use `DefaultMaxDuration` and `DefaultInterval` as the
|
||||||
|
maximum duration and interval.
|
||||||
|
|
||||||
|
```
|
||||||
|
package something
|
||||||
|
|
||||||
|
import "github.com/cloudflare/backoff"
|
||||||
|
|
||||||
|
func retryable() {
|
||||||
|
b := backoff.New(0, 0)
|
||||||
|
for {
|
||||||
|
err := someOperation()
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("error in someOperation: %v", err)
|
||||||
|
<-time.After(b.Duration())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("succeeded after %d tries", b.Tries()+1)
|
||||||
|
b.Reset()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
It can also be used to rate limit code that should retry infinitely, but which does not
|
||||||
|
use `Backoff` itself.
|
||||||
|
|
||||||
|
```
|
||||||
|
package something
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cloudflare/backoff"
|
||||||
|
)
|
||||||
|
|
||||||
|
func retryable() {
|
||||||
|
b := backoff.New(0, 0)
|
||||||
|
b.SetDecay(30 * time.Second)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// b will reset if someOperation returns later than
|
||||||
|
// the last call to b.Duration() + 30s.
|
||||||
|
err := someOperation()
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("error in someOperation: %v", err)
|
||||||
|
<-time.After(b.Duration())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tunables
|
||||||
|
|
||||||
|
* `NewWithoutJitter` creates a Backoff that doesn't use jitter.
|
||||||
|
|
||||||
|
The default behaviour is controlled by two variables:
|
||||||
|
|
||||||
|
* `DefaultInterval` sets the base interval for backoffs created with
|
||||||
|
the zero `time.Duration` value in the `Interval` field.
|
||||||
|
* `DefaultMaxDuration` sets the maximum duration for backoffs created
|
||||||
|
with the zero `time.Duration` value in the `MaxDuration` field.
|
||||||
|
|
||||||
197
backoff/backoff.go
Normal file
197
backoff/backoff.go
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
// Package backoff contains an implementation of an intelligent backoff
|
||||||
|
// strategy. It is based on the approach in the AWS architecture blog
|
||||||
|
// article titled "Exponential Backoff And Jitter", which is found at
|
||||||
|
// http://www.awsarchitectureblog.com/2015/03/backoff.html.
|
||||||
|
//
|
||||||
|
// Essentially, the backoff has an interval `time.Duration`; the nth
|
||||||
|
// call to backoff will return a `time.Duration` that is 2^n *
|
||||||
|
// interval. If jitter is enabled (which is the default behaviour),
|
||||||
|
// the duration is a random value between 0 and 2^n * interval. The
|
||||||
|
// backoff is configured with a maximum duration that will not be
|
||||||
|
// exceeded.
|
||||||
|
//
|
||||||
|
// The `New` function will attempt to use the system's cryptographic
|
||||||
|
// random number generator to seed a Go math/rand random number
|
||||||
|
// source. If this fails, the package will panic on startup.
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
mrand "math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var prngMu sync.Mutex
|
||||||
|
var prng *mrand.Rand
|
||||||
|
|
||||||
|
// DefaultInterval is used when a Backoff is initialised with a
|
||||||
|
// zero-value Interval.
|
||||||
|
var DefaultInterval = 5 * time.Minute
|
||||||
|
|
||||||
|
// DefaultMaxDuration is maximum amount of time that the backoff will
|
||||||
|
// delay for.
|
||||||
|
var DefaultMaxDuration = 6 * time.Hour
|
||||||
|
|
||||||
|
// A Backoff contains the information needed to intelligently backoff
|
||||||
|
// and retry operations using an exponential backoff algorithm. It should
|
||||||
|
// be initialised with a call to `New`.
|
||||||
|
//
|
||||||
|
// Only use a Backoff from a single goroutine, it is not safe for concurrent
|
||||||
|
// access.
|
||||||
|
type Backoff struct {
|
||||||
|
// maxDuration is the largest possible duration that can be
|
||||||
|
// returned from a call to Duration.
|
||||||
|
maxDuration time.Duration
|
||||||
|
|
||||||
|
// interval controls the time step for backing off.
|
||||||
|
interval time.Duration
|
||||||
|
|
||||||
|
// noJitter controls whether to use the "Full Jitter"
|
||||||
|
// improvement to attempt to smooth out spikes in a high
|
||||||
|
// contention scenario. If noJitter is set to true, no
|
||||||
|
// jitter will be introduced.
|
||||||
|
noJitter bool
|
||||||
|
|
||||||
|
// decay controls the decay of n. If it is non-zero, n is
|
||||||
|
// reset if more than the last backoff + decay has elapsed since
|
||||||
|
// the last try.
|
||||||
|
decay time.Duration
|
||||||
|
|
||||||
|
n uint64
|
||||||
|
lastTry time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new backoff with the specified max duration and
|
||||||
|
// interval. Zero values may be used to use the default values.
|
||||||
|
//
|
||||||
|
// Panics if either max or interval is negative.
|
||||||
|
func New(max time.Duration, interval time.Duration) *Backoff {
|
||||||
|
if max < 0 || interval < 0 {
|
||||||
|
panic("backoff: max or interval is negative")
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &Backoff{
|
||||||
|
maxDuration: max,
|
||||||
|
interval: interval,
|
||||||
|
}
|
||||||
|
b.setup()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithoutJitter works similarly to New, except that the created
|
||||||
|
// Backoff will not use jitter.
|
||||||
|
func NewWithoutJitter(max time.Duration, interval time.Duration) *Backoff {
|
||||||
|
b := New(max, interval)
|
||||||
|
b.noJitter = true
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var buf [8]byte
|
||||||
|
var n int64
|
||||||
|
|
||||||
|
_, err := io.ReadFull(rand.Reader, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
n = int64(binary.LittleEndian.Uint64(buf[:]))
|
||||||
|
|
||||||
|
src := mrand.NewSource(n)
|
||||||
|
prng = mrand.New(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backoff) setup() {
|
||||||
|
if b.interval == 0 {
|
||||||
|
b.interval = DefaultInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.maxDuration == 0 {
|
||||||
|
b.maxDuration = DefaultMaxDuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration returns a time.Duration appropriate for the backoff,
|
||||||
|
// incrementing the attempt counter.
|
||||||
|
func (b *Backoff) Duration() time.Duration {
|
||||||
|
b.setup()
|
||||||
|
|
||||||
|
b.decayN()
|
||||||
|
|
||||||
|
t := b.duration(b.n)
|
||||||
|
|
||||||
|
if b.n < math.MaxUint64 {
|
||||||
|
b.n++
|
||||||
|
}
|
||||||
|
|
||||||
|
if !b.noJitter {
|
||||||
|
prngMu.Lock()
|
||||||
|
t = time.Duration(prng.Int63n(int64(t)))
|
||||||
|
prngMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// requires b to be locked.
|
||||||
|
func (b *Backoff) duration(n uint64) (t time.Duration) {
|
||||||
|
// Saturate pow
|
||||||
|
pow := time.Duration(math.MaxInt64)
|
||||||
|
if n < 63 {
|
||||||
|
pow = 1 << n
|
||||||
|
}
|
||||||
|
|
||||||
|
t = b.interval * pow
|
||||||
|
if t/pow != b.interval || t > b.maxDuration {
|
||||||
|
t = b.maxDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the attempt counter of a backoff.
|
||||||
|
//
|
||||||
|
// It should be called when the rate-limited action succeeds.
|
||||||
|
func (b *Backoff) Reset() {
|
||||||
|
b.lastTry = time.Time{}
|
||||||
|
b.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDecay sets the duration after which the try counter will be reset.
|
||||||
|
// Panics if decay is smaller than 0.
|
||||||
|
//
|
||||||
|
// The decay only kicks in if at least the last backoff + decay has elapsed
|
||||||
|
// since the last try.
|
||||||
|
func (b *Backoff) SetDecay(decay time.Duration) {
|
||||||
|
if decay < 0 {
|
||||||
|
panic("backoff: decay < 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
b.decay = decay
|
||||||
|
}
|
||||||
|
|
||||||
|
// requires b to be locked
|
||||||
|
func (b *Backoff) decayN() {
|
||||||
|
if b.decay == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.lastTry.IsZero() {
|
||||||
|
b.lastTry = time.Now()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lastDuration := b.duration(b.n - 1)
|
||||||
|
decayed := time.Since(b.lastTry) > lastDuration+b.decay
|
||||||
|
b.lastTry = time.Now()
|
||||||
|
|
||||||
|
if !decayed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b.n = 0
|
||||||
|
}
|
||||||
175
backoff/backoff_test.go
Normal file
175
backoff/backoff_test.go
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// If given New with 0's and no jitter, ensure that certain invariants are met:
|
||||||
|
//
|
||||||
|
// - the default max duration and interval should be used
|
||||||
|
// - noJitter should be true
|
||||||
|
// - the RNG should not be initialised
|
||||||
|
// - the first duration should be equal to the default interval
|
||||||
|
func TestDefaults(t *testing.T) {
|
||||||
|
b := NewWithoutJitter(0, 0)
|
||||||
|
|
||||||
|
if b.maxDuration != DefaultMaxDuration {
|
||||||
|
t.Fatalf("expected new backoff to use the default max duration (%s), but have %s", DefaultMaxDuration, b.maxDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.interval != DefaultInterval {
|
||||||
|
t.Fatalf("exepcted new backoff to use the default interval (%s), but have %s", DefaultInterval, b.interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.noJitter != true {
|
||||||
|
t.Fatal("backoff should have been initialised without jitter")
|
||||||
|
}
|
||||||
|
|
||||||
|
dur := b.Duration()
|
||||||
|
if dur != DefaultInterval {
|
||||||
|
t.Fatalf("expected first duration to be %s, have %s", DefaultInterval, dur)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a zero-value initialised Backoff, it should be transparently
|
||||||
|
// setup.
|
||||||
|
func TestSetup(t *testing.T) {
|
||||||
|
b := new(Backoff)
|
||||||
|
dur := b.Duration()
|
||||||
|
if dur < 0 || dur > (5*time.Minute) {
|
||||||
|
t.Fatalf("want duration between 0 and 5 minutes, have %s", dur)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that tries incremenets as expected.
|
||||||
|
func TestTries(t *testing.T) {
|
||||||
|
b := NewWithoutJitter(5, 1)
|
||||||
|
|
||||||
|
for i := uint64(0); i < 3; i++ {
|
||||||
|
if b.n != i {
|
||||||
|
t.Fatalf("want tries=%d, have tries=%d", i, b.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
pow := 1 << i
|
||||||
|
expected := time.Duration(pow)
|
||||||
|
dur := b.Duration()
|
||||||
|
if dur != expected {
|
||||||
|
t.Fatalf("want duration=%d, have duration=%d at i=%d", expected, dur, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := uint(3); i < 5; i++ {
|
||||||
|
dur := b.Duration()
|
||||||
|
if dur != 5 {
|
||||||
|
t.Fatalf("want duration=5, have %d at i=%d", dur, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a call to Reset will actually reset the Backoff.
|
||||||
|
func TestReset(t *testing.T) {
|
||||||
|
const iter = 10
|
||||||
|
b := New(1000, 1)
|
||||||
|
for i := 0; i < iter; i++ {
|
||||||
|
_ = b.Duration()
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.n != iter {
|
||||||
|
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Reset()
|
||||||
|
if b.n != 0 {
|
||||||
|
t.Fatalf("expected tries=0 after reset, have tries=%d", b.n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const decay = 5 * time.Millisecond
|
||||||
|
const max = 10 * time.Millisecond
|
||||||
|
const interval = time.Millisecond
|
||||||
|
|
||||||
|
func TestDecay(t *testing.T) {
|
||||||
|
const iter = 10
|
||||||
|
|
||||||
|
b := NewWithoutJitter(max, 1)
|
||||||
|
b.SetDecay(decay)
|
||||||
|
|
||||||
|
var backoff time.Duration
|
||||||
|
for i := 0; i < iter; i++ {
|
||||||
|
backoff = b.Duration()
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.n != iter {
|
||||||
|
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't decay below backoff
|
||||||
|
b.lastTry = time.Now().Add(-backoff + 1)
|
||||||
|
backoff = b.Duration()
|
||||||
|
if b.n != iter+1 {
|
||||||
|
t.Fatalf("expected tries=%d, have tries=%d", iter+1, b.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset after backoff + decay
|
||||||
|
b.lastTry = time.Now().Add(-backoff - decay)
|
||||||
|
b.Duration()
|
||||||
|
if b.n != 1 {
|
||||||
|
t.Fatalf("expected tries=%d, have tries=%d", 1, b.n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that decay works even if the retry counter is saturated.
|
||||||
|
func TestDecaySaturation(t *testing.T) {
|
||||||
|
b := NewWithoutJitter(1<<2, 1)
|
||||||
|
b.SetDecay(decay)
|
||||||
|
|
||||||
|
var duration time.Duration
|
||||||
|
for i := 0; i <= 2; i++ {
|
||||||
|
duration = b.Duration()
|
||||||
|
}
|
||||||
|
|
||||||
|
if duration != 1<<2 {
|
||||||
|
t.Fatalf("expected duration=%v, have duration=%v", 1<<2, duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.lastTry = time.Now().Add(-duration - decay)
|
||||||
|
b.n = math.MaxUint64
|
||||||
|
|
||||||
|
duration = b.Duration()
|
||||||
|
if duration != 1 {
|
||||||
|
t.Errorf("expected duration=%v, have duration=%v", 1, duration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBackoff_SetDecay() {
|
||||||
|
b := NewWithoutJitter(max, interval)
|
||||||
|
b.SetDecay(decay)
|
||||||
|
|
||||||
|
// try 0
|
||||||
|
fmt.Println(b.Duration())
|
||||||
|
|
||||||
|
// try 1
|
||||||
|
fmt.Println(b.Duration())
|
||||||
|
|
||||||
|
// try 2
|
||||||
|
duration := b.Duration()
|
||||||
|
fmt.Println(duration)
|
||||||
|
|
||||||
|
// try 3, below decay
|
||||||
|
time.Sleep(duration)
|
||||||
|
duration = b.Duration()
|
||||||
|
fmt.Println(duration)
|
||||||
|
|
||||||
|
// try 4, resets
|
||||||
|
time.Sleep(duration + decay)
|
||||||
|
fmt.Println(b.Duration())
|
||||||
|
|
||||||
|
// Output: 1ms
|
||||||
|
// 2ms
|
||||||
|
// 4ms
|
||||||
|
// 8ms
|
||||||
|
// 1ms
|
||||||
|
}
|
||||||
32
cmd/data_sync/README
Normal file
32
cmd/data_sync/README
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
data_sync
|
||||||
|
|
||||||
|
This is a tool I wrote primarily to sync my home directory to a backup
|
||||||
|
drive plugged into my laptop. This system is provisioned by Ansible,
|
||||||
|
and the goal is to be able to just copy my home directory back in the
|
||||||
|
event of a failure without having lost a great deal of work or to wait
|
||||||
|
for ansible to finish installing the right backup software. Specifically,
|
||||||
|
I use a Framework laptop with the 1TB storage module, encrypted with
|
||||||
|
LUKS, and run this twice daily (timed to correspond with my commute,
|
||||||
|
though that's not really necessary). It started off as a shell script,
|
||||||
|
then I decided to just write it as a program.
|
||||||
|
|
||||||
|
Usage: data_sync [-d path] [-l level] [-m path] [-nqsv]
|
||||||
|
[-t path]
|
||||||
|
-d path path to sync source directory
|
||||||
|
(default "~")
|
||||||
|
-l level log level to output (default "INFO"). Valid log
|
||||||
|
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
|
||||||
|
CRIT, ALERT, EMERG. The default is INFO.
|
||||||
|
-m path path to sync mount directory
|
||||||
|
(default "/media/$USER/$(hostname -s)_data")
|
||||||
|
-n dry-run mode: only check paths and print files to
|
||||||
|
exclude
|
||||||
|
-q suppress console output
|
||||||
|
-s suppress syslog output
|
||||||
|
-t path path to sync target directory
|
||||||
|
(default "/media/$USER/$(hostname -s)_data/$USER")
|
||||||
|
-v verbose rsync output
|
||||||
|
|
||||||
|
data_sync rsyncs the tree at the sync source directory (-d) to the sync target
|
||||||
|
directory (-t); it checks the mount directory (-m) exists; the sync target
|
||||||
|
target directory must exist on the mount directory.
|
||||||
230
cmd/data_sync/main.go
Normal file
230
cmd/data_sync/main.go
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/config"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mustHostname() string {
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
log.FatalError(err, "couldn't retrieve hostname")
|
||||||
|
|
||||||
|
if hostname == "" {
|
||||||
|
log.Fatal("no hostname returned")
|
||||||
|
}
|
||||||
|
return strings.Split(hostname, ".")[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultDataDir = mustHostname() + "_data"
|
||||||
|
defaultProgName = defaultDataDir + "_sync"
|
||||||
|
defaultMountDir = filepath.Join("/media", os.Getenv("USER"), defaultDataDir)
|
||||||
|
defaultSyncDir = os.Getenv("HOME")
|
||||||
|
defaultTargetDir = filepath.Join(defaultMountDir, os.Getenv("USER"))
|
||||||
|
)
|
||||||
|
|
||||||
|
func usage(w io.Writer) {
|
||||||
|
prog := filepath.Base(os.Args[0])
|
||||||
|
fmt.Fprintf(w, `Usage: %s [-d path] [-l level] [-m path] [-nqsv]
|
||||||
|
[-t path]
|
||||||
|
-d path path to sync source directory
|
||||||
|
(default "%s")
|
||||||
|
-l level log level to output (default "INFO"). Valid log
|
||||||
|
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
|
||||||
|
CRIT, ALERT, EMERG. The default is INFO.
|
||||||
|
-m path path to sync mount directory
|
||||||
|
(default "%s")
|
||||||
|
-n dry-run mode: only check paths and print files to
|
||||||
|
exclude
|
||||||
|
-q suppress console output
|
||||||
|
-s suppress syslog output
|
||||||
|
-t path path to sync target directory
|
||||||
|
(default "%s")
|
||||||
|
-v verbose rsync output
|
||||||
|
|
||||||
|
%s rsyncs the tree at the sync source directory (-d) to the sync target
|
||||||
|
directory (-t); it checks the mount directory (-m) exists; the sync target
|
||||||
|
target directory must exist on the mount directory.
|
||||||
|
|
||||||
|
`, prog, defaultSyncDir, defaultMountDir, defaultTargetDir, prog)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPaths(mount, target string, dryRun bool) error {
|
||||||
|
if !fileutil.DirectoryDoesExist(mount) {
|
||||||
|
return fmt.Errorf("sync dir %s isn't mounted", mount)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(target, mount) {
|
||||||
|
return fmt.Errorf("target dir %s must exist in %s", target, mount)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fileutil.DirectoryDoesExist(target) {
|
||||||
|
if dryRun {
|
||||||
|
log.Infof("would mkdir %s", target)
|
||||||
|
} else {
|
||||||
|
log.Infof("mkdir %s", target)
|
||||||
|
if err := os.Mkdir(target, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildExcludes(syncDir string) ([]string, error) {
|
||||||
|
var excluded []string
|
||||||
|
|
||||||
|
walker := func(path string, info fs.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||||
|
if info != nil && info.IsDir() {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Mode().IsRegular() {
|
||||||
|
if err = fileutil.Access(path, fileutil.AccessRead); err != nil {
|
||||||
|
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
if err = fileutil.Access(path, fileutil.AccessExec); err != nil {
|
||||||
|
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := filepath.Walk(syncDir, walker)
|
||||||
|
return excluded, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeExcludes(excluded []string) (string, error) {
|
||||||
|
if len(excluded) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
excludeFile, err := os.CreateTemp("", defaultProgName)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, name := range excluded {
|
||||||
|
fmt.Fprintln(excludeFile, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer excludeFile.Close()
|
||||||
|
return excludeFile.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func rsync(syncDir, target, excludeFile string, verboseRsync bool) error {
|
||||||
|
var args []string
|
||||||
|
|
||||||
|
if excludeFile != "" {
|
||||||
|
args = append(args, "--exclude-from")
|
||||||
|
args = append(args, excludeFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
if verboseRsync {
|
||||||
|
args = append(args, "--progress")
|
||||||
|
args = append(args, "-v")
|
||||||
|
}
|
||||||
|
|
||||||
|
args = append(args, []string{"-au", syncDir + "/", target + "/"}...)
|
||||||
|
|
||||||
|
path, err := exec.LookPath("rsync")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(path, args...)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
return cmd.Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.Usage = func() { usage(os.Stderr) }
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
var logLevel, mountDir, syncDir, target string
|
||||||
|
var dryRun, quietMode, noSyslog, verboseRsync bool
|
||||||
|
|
||||||
|
flag.StringVar(&syncDir, "d", config.GetDefault("sync_dir", defaultSyncDir),
|
||||||
|
"`path to sync source directory`")
|
||||||
|
flag.StringVar(&logLevel, "l", config.GetDefault("log_level", "INFO"),
|
||||||
|
"log level to output")
|
||||||
|
flag.StringVar(&mountDir, "m", config.GetDefault("mount_dir", defaultMountDir),
|
||||||
|
"`path` to sync mount directory")
|
||||||
|
flag.BoolVar(&dryRun, "n", false, "dry-run mode: only check paths and print files to exclude")
|
||||||
|
flag.BoolVar(&quietMode, "q", quietMode, "suppress console output")
|
||||||
|
flag.BoolVar(&noSyslog, "s", noSyslog, "suppress syslog output")
|
||||||
|
flag.StringVar(&target, "t", config.GetDefault("sync_target", defaultTargetDir),
|
||||||
|
"`path` to sync target directory")
|
||||||
|
flag.BoolVar(&verboseRsync, "v", false, "verbose rsync output")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if quietMode && noSyslog {
|
||||||
|
fmt.Fprintln(os.Stderr, "both console and syslog output are suppressed")
|
||||||
|
fmt.Fprintln(os.Stderr, "errors will NOT be reported")
|
||||||
|
}
|
||||||
|
|
||||||
|
logOpts := &log.Options{
|
||||||
|
Level: logLevel,
|
||||||
|
Tag: defaultProgName,
|
||||||
|
Facility: "user",
|
||||||
|
WriteSyslog: !noSyslog,
|
||||||
|
WriteConsole: !quietMode,
|
||||||
|
}
|
||||||
|
err := log.Setup(logOpts)
|
||||||
|
log.FatalError(err, "failed to set up logging")
|
||||||
|
|
||||||
|
log.Infof("checking paths: mount=%s, target=%s", mountDir, target)
|
||||||
|
err = checkPaths(mountDir, target, dryRun)
|
||||||
|
log.FatalError(err, "target dir isn't ready")
|
||||||
|
|
||||||
|
log.Infof("checking for files to exclude from %s", syncDir)
|
||||||
|
excluded, err := buildExcludes(syncDir)
|
||||||
|
log.FatalError(err, "couldn't build excludes")
|
||||||
|
|
||||||
|
if dryRun {
|
||||||
|
fmt.Println("excluded files:")
|
||||||
|
for _, path := range excluded {
|
||||||
|
fmt.Printf("\t%s\n", path)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
excludeFile, err := writeExcludes(excluded)
|
||||||
|
log.FatalError(err, "couldn't write exclude file")
|
||||||
|
log.Infof("excluding %d files via %s", len(excluded), excludeFile)
|
||||||
|
|
||||||
|
if excludeFile != "" {
|
||||||
|
defer func() {
|
||||||
|
log.Infof("removing exclude file %s", excludeFile)
|
||||||
|
if err := os.Remove(excludeFile); err != nil {
|
||||||
|
log.Warningf("failed to remove temp file %s", excludeFile)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rsync(syncDir, target, excludeFile, verboseRsync)
|
||||||
|
log.FatalError(err, "couldn't sync data")
|
||||||
|
}
|
||||||
@@ -13,7 +13,7 @@ go_test(
|
|||||||
srcs = ["dbg_test.go"],
|
srcs = ["dbg_test.go"],
|
||||||
embed = [":dbg"],
|
embed = [":dbg"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//assert",
|
||||||
"//testio",
|
"//testio",
|
||||||
"@com_github_stretchr_testify//require",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
package dbg
|
package dbg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/assert"
|
||||||
"git.wntrmute.dev/kyle/goutils/testio"
|
"git.wntrmute.dev/kyle/goutils/testio"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
func TestNew(t *testing.T) {
|
||||||
@@ -17,16 +18,16 @@ func TestNew(t *testing.T) {
|
|||||||
dbg.Print("hello")
|
dbg.Print("hello")
|
||||||
dbg.Println("hello")
|
dbg.Println("hello")
|
||||||
dbg.Printf("hello %s", "world")
|
dbg.Printf("hello %s", "world")
|
||||||
require.Equal(t, 0, buf.Len())
|
assert.BoolT(t, buf.Len() == 0)
|
||||||
|
|
||||||
dbg.Enabled = true
|
dbg.Enabled = true
|
||||||
dbg.Print("hello") // +5
|
dbg.Print("hello") // +5
|
||||||
dbg.Println("hello") // +6
|
dbg.Println("hello") // +6
|
||||||
dbg.Printf("hello %s", "world") // +11
|
dbg.Printf("hello %s", "world") // +11
|
||||||
require.Equal(t, 22, buf.Len())
|
assert.BoolT(t, buf.Len() == 22, fmt.Sprintf("buffer should be length 22 but is length %d", buf.Len()))
|
||||||
|
|
||||||
err := dbg.Close()
|
err := dbg.Close()
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTo(t *testing.T) {
|
func TestTo(t *testing.T) {
|
||||||
@@ -36,39 +37,38 @@ func TestTo(t *testing.T) {
|
|||||||
dbg.Print("hello")
|
dbg.Print("hello")
|
||||||
dbg.Println("hello")
|
dbg.Println("hello")
|
||||||
dbg.Printf("hello %s", "world")
|
dbg.Printf("hello %s", "world")
|
||||||
require.Equal(t, 0, buf.Len())
|
assert.BoolT(t, buf.Len() == 0, "debug output should be suppressed")
|
||||||
|
|
||||||
dbg.Enabled = true
|
dbg.Enabled = true
|
||||||
dbg.Print("hello") // +5
|
dbg.Print("hello") // +5
|
||||||
dbg.Println("hello") // +6
|
dbg.Println("hello") // +6
|
||||||
dbg.Printf("hello %s", "world") // +11
|
dbg.Printf("hello %s", "world") // +11
|
||||||
|
assert.BoolT(t, buf.Len() == 22, "didn't get the expected debug output")
|
||||||
require.Equal(t, 22, buf.Len())
|
|
||||||
|
|
||||||
err := dbg.Close()
|
err := dbg.Close()
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestToFile(t *testing.T) {
|
func TestToFile(t *testing.T) {
|
||||||
testFile, err := ioutil.TempFile("", "dbg")
|
testFile, err := ioutil.TempFile("", "dbg")
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
err = testFile.Close()
|
err = testFile.Close()
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|
||||||
testFileName := testFile.Name()
|
testFileName := testFile.Name()
|
||||||
defer os.Remove(testFileName)
|
defer os.Remove(testFileName)
|
||||||
|
|
||||||
dbg, err := ToFile(testFileName)
|
dbg, err := ToFile(testFileName)
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|
||||||
dbg.Print("hello")
|
dbg.Print("hello")
|
||||||
dbg.Println("hello")
|
dbg.Println("hello")
|
||||||
dbg.Printf("hello %s", "world")
|
dbg.Printf("hello %s", "world")
|
||||||
|
|
||||||
stat, err := os.Stat(testFileName)
|
stat, err := os.Stat(testFileName)
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|
||||||
require.EqualValues(t, 0, stat.Size())
|
assert.BoolT(t, stat.Size() == 0, "no debug output should have been sent to the log file")
|
||||||
|
|
||||||
dbg.Enabled = true
|
dbg.Enabled = true
|
||||||
dbg.Print("hello") // +5
|
dbg.Print("hello") // +5
|
||||||
@@ -76,12 +76,12 @@ func TestToFile(t *testing.T) {
|
|||||||
dbg.Printf("hello %s", "world") // +11
|
dbg.Printf("hello %s", "world") // +11
|
||||||
|
|
||||||
stat, err = os.Stat(testFileName)
|
stat, err = os.Stat(testFileName)
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|
||||||
require.EqualValues(t, 22, stat.Size())
|
assert.BoolT(t, stat.Size() == 22, fmt.Sprintf("have %d bytes in the log file, expected 22", stat.Size()))
|
||||||
|
|
||||||
err = dbg.Close()
|
err = dbg.Close()
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriting(t *testing.T) {
|
func TestWriting(t *testing.T) {
|
||||||
@@ -90,31 +90,31 @@ func TestWriting(t *testing.T) {
|
|||||||
dbg := To(buf)
|
dbg := To(buf)
|
||||||
|
|
||||||
n, err := dbg.Write(data)
|
n, err := dbg.Write(data)
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
require.EqualValues(t, 0, n)
|
assert.BoolT(t, n == 0, "expected nothing to be written to the buffer")
|
||||||
|
|
||||||
dbg.Enabled = true
|
dbg.Enabled = true
|
||||||
n, err = dbg.Write(data)
|
n, err = dbg.Write(data)
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
require.EqualValues(t, 12, n)
|
assert.BoolT(t, n == 12, fmt.Sprintf("wrote %d bytes in the buffer, expected to write 12", n))
|
||||||
|
|
||||||
err = dbg.Close()
|
err = dbg.Close()
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestToFileError(t *testing.T) {
|
func TestToFileError(t *testing.T) {
|
||||||
testFile, err := ioutil.TempFile("", "dbg")
|
testFile, err := ioutil.TempFile("", "dbg")
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
err = testFile.Chmod(0400)
|
err = testFile.Chmod(0400)
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
err = testFile.Close()
|
err = testFile.Close()
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|
||||||
testFileName := testFile.Name()
|
testFileName := testFile.Name()
|
||||||
|
|
||||||
_, err = ToFile(testFileName)
|
_, err = ToFile(testFileName)
|
||||||
require.Error(t, err)
|
assert.ErrorT(t, err)
|
||||||
|
|
||||||
err = os.Remove(testFileName)
|
err = os.Remove(testFileName)
|
||||||
require.NoError(t, err)
|
assert.NoErrorT(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
10
deps.bzl
10
deps.bzl
@@ -221,8 +221,8 @@ def go_dependencies():
|
|||||||
go_repository(
|
go_repository(
|
||||||
name = "com_github_stretchr_objx",
|
name = "com_github_stretchr_objx",
|
||||||
importpath = "github.com/stretchr/objx",
|
importpath = "github.com/stretchr/objx",
|
||||||
sum = "h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=",
|
sum = "h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=",
|
||||||
version = "v0.1.1",
|
version = "v0.1.0",
|
||||||
)
|
)
|
||||||
go_repository(
|
go_repository(
|
||||||
name = "com_github_stretchr_testify",
|
name = "com_github_stretchr_testify",
|
||||||
@@ -302,12 +302,6 @@ def go_dependencies():
|
|||||||
sum = "h1:bkb2NMGo3/Du52wvYj9Whth5KZfMV6d3O0Vbr3nz/UE=",
|
sum = "h1:bkb2NMGo3/Du52wvYj9Whth5KZfMV6d3O0Vbr3nz/UE=",
|
||||||
version = "v0.0.0-20150115234039-8488cc47d90c",
|
version = "v0.0.0-20150115234039-8488cc47d90c",
|
||||||
)
|
)
|
||||||
go_repository(
|
|
||||||
name = "org_golang_google_appengine",
|
|
||||||
importpath = "google.golang.org/appengine",
|
|
||||||
sum = "h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=",
|
|
||||||
version = "v1.6.6",
|
|
||||||
)
|
|
||||||
go_repository(
|
go_repository(
|
||||||
name = "org_golang_x_crypto",
|
name = "org_golang_x_crypto",
|
||||||
importpath = "golang.org/x/crypto",
|
importpath = "golang.org/x/crypto",
|
||||||
|
|||||||
3
go.mod
3
go.mod
@@ -7,7 +7,6 @@ require (
|
|||||||
github.com/kr/text v0.2.0
|
github.com/kr/text v0.2.0
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/pkg/sftp v1.12.0
|
github.com/pkg/sftp v1.12.0
|
||||||
github.com/stretchr/testify v1.6.1
|
|
||||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b
|
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b
|
||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
|
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
@@ -21,7 +20,5 @@ require (
|
|||||||
require (
|
require (
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
type logger struct {
|
type logger struct {
|
||||||
l gsyslog.Syslogger
|
l gsyslog.Syslogger
|
||||||
p gsyslog.Priority
|
p gsyslog.Priority
|
||||||
|
writeConsole bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
|
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
|
||||||
@@ -21,7 +22,7 @@ func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}
|
|||||||
format += "\n"
|
format += "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
if p <= log.p {
|
if p <= log.p && log.writeConsole {
|
||||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||||
fmt.Printf(format, args...)
|
fmt.Printf(format, args...)
|
||||||
}
|
}
|
||||||
@@ -32,7 +33,7 @@ func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
|
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
|
||||||
if p <= log.p {
|
if p <= log.p && log.writeConsole {
|
||||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||||
fmt.Print(args...)
|
fmt.Print(args...)
|
||||||
}
|
}
|
||||||
@@ -43,7 +44,7 @@ func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
|
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
|
||||||
if p <= log.p {
|
if p <= log.p && log.writeConsole {
|
||||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||||
fmt.Println(args...)
|
fmt.Println(args...)
|
||||||
}
|
}
|
||||||
@@ -102,6 +103,7 @@ type Options struct {
|
|||||||
Tag string
|
Tag string
|
||||||
Facility string
|
Facility string
|
||||||
WriteSyslog bool
|
WriteSyslog bool
|
||||||
|
WriteConsole bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultOptions returns a sane set of defaults for syslog, using the program
|
// DefaultOptions returns a sane set of defaults for syslog, using the program
|
||||||
@@ -117,6 +119,7 @@ func DefaultOptions(tag string, withSyslog bool) *Options {
|
|||||||
Tag: tag,
|
Tag: tag,
|
||||||
Facility: "daemon",
|
Facility: "daemon",
|
||||||
WriteSyslog: withSyslog,
|
WriteSyslog: withSyslog,
|
||||||
|
WriteConsole: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,8 +133,10 @@ func DefaultDebugOptions(tag string, withSyslog bool) *Options {
|
|||||||
|
|
||||||
return &Options{
|
return &Options{
|
||||||
Level: "DEBUG",
|
Level: "DEBUG",
|
||||||
|
Tag: tag,
|
||||||
Facility: "daemon",
|
Facility: "daemon",
|
||||||
WriteSyslog: withSyslog,
|
WriteSyslog: withSyslog,
|
||||||
|
WriteConsole: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,6 +147,7 @@ func Setup(opts *Options) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.p = priority
|
log.p = priority
|
||||||
|
log.writeConsole = opts.WriteConsole
|
||||||
|
|
||||||
if opts.WriteSyslog {
|
if opts.WriteSyslog {
|
||||||
var err error
|
var err error
|
||||||
@@ -261,6 +267,17 @@ func Fatalf(format string, args ...interface{}) {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FatalError will only execute if err != nil. If it does,
|
||||||
|
// it will print the message (append the error) and exit
|
||||||
|
// the program.
|
||||||
|
func FatalError(err error, message string) {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
Fatal(fmt.Sprintf("%s: %s", message, err))
|
||||||
|
}
|
||||||
|
|
||||||
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
||||||
func Spew(args ...interface{}) {
|
func Spew(args ...interface{}) {
|
||||||
log.spew(args...)
|
log.spew(args...)
|
||||||
|
|||||||
@@ -55,3 +55,8 @@ func (b *Buffer) Close() error {
|
|||||||
func (b *Buffer) Len() int {
|
func (b *Buffer) Len() int {
|
||||||
return len(b.data[b.pos:])
|
return len(b.data[b.pos:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Bytes returns the underlying bytes from the current position.
|
||||||
|
func (b *Buffer) Bytes() []byte {
|
||||||
|
return b.data[b.pos:]
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user