Use mcdsl/terminal for all password prompts

Replace direct golang.org/x/term calls with mcdsl/terminal.ReadPassword
across mciasctl (6 sites), mciasgrpcctl (1 site), and mciasdb (1 site).
Aligns with the new CLI security standard in engineering-standards.md.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-28 11:40:11 -07:00
parent e4220b840e
commit 5b5e1a7ed6
142 changed files with 10241 additions and 7788 deletions

View File

@@ -32,12 +32,17 @@ type BufferPool interface {
Get(length int) *[]byte
// Put returns a buffer to the pool.
//
// The provided pointer must hold a prefix of the buffer obtained via
// BufferPool.Get to ensure the buffer's entire capacity can be re-used.
Put(*[]byte)
}
const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2.
var defaultBufferPoolSizes = []int{
256,
4 << 10, // 4KB (go page size)
goPageSize,
16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
32 << 10, // 32KB (default buffer size for io.Copy)
1 << 20, // 1MB
@@ -48,7 +53,7 @@ var defaultBufferPool BufferPool
func init() {
defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
internal.SetDefaultBufferPool = func(pool BufferPool) {
defaultBufferPool = pool
}
@@ -118,7 +123,11 @@ type sizedBufferPool struct {
}
func (p *sizedBufferPool) Get(size int) *[]byte {
buf := p.pool.Get().(*[]byte)
buf, ok := p.pool.Get().(*[]byte)
if !ok {
buf := make([]byte, size, p.defaultSize)
return &buf
}
b := *buf
clear(b[:cap(b)])
*buf = b[:size]
@@ -137,12 +146,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) {
func newSizedBufferPool(size int) *sizedBufferPool {
return &sizedBufferPool{
pool: sync.Pool{
New: func() any {
buf := make([]byte, size)
return &buf
},
},
defaultSize: size,
}
}
@@ -160,6 +163,7 @@ type simpleBufferPool struct {
func (p *simpleBufferPool) Get(size int) *[]byte {
bs, ok := p.pool.Get().(*[]byte)
if ok && cap(*bs) >= size {
clear((*bs)[:cap(*bs)])
*bs = (*bs)[:size]
return bs
}
@@ -170,7 +174,14 @@ func (p *simpleBufferPool) Get(size int) *[]byte {
p.pool.Put(bs)
}
b := make([]byte, size)
// If we're going to allocate, round up to the nearest page. This way if
// requests frequently arrive with small variation we don't allocate
// repeatedly if we get unlucky and they increase over time. By default we
// only allocate here if size > 1MiB. Because goPageSize is a power of 2, we
// can round up efficiently.
allocSize := (size + goPageSize - 1) & ^(goPageSize - 1)
b := make([]byte, size, allocSize)
return &b
}

View File

@@ -19,6 +19,7 @@
package mem
import (
"fmt"
"io"
)
@@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
// Reader returns a new Reader for the input slice after taking references to
// each underlying buffer.
func (s BufferSlice) Reader() Reader {
func (s BufferSlice) Reader() *Reader {
s.Ref()
return &sliceReader{
return &Reader{
data: s,
len: s.Len(),
}
}
// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
// with other parts systems. It also provides an additional convenience method
// Remaining(), which returns the number of unread bytes remaining in the slice.
// with other systems.
//
// Buffers will be freed as they are read.
type Reader interface {
io.Reader
io.ByteReader
// Close frees the underlying BufferSlice and never returns an error. Subsequent
// calls to Read will return (0, io.EOF).
Close() error
// Remaining returns the number of unread bytes remaining in the slice.
Remaining() int
// Reset frees the currently held buffer slice and starts reading from the
// provided slice. This allows reusing the reader object.
Reset(s BufferSlice)
}
type sliceReader struct {
//
// A Reader can be constructed from a BufferSlice; alternatively the zero value
// of a Reader may be used after calling Reset on it.
type Reader struct {
data BufferSlice
len int
// The index into data[0].ReadOnlyData().
bufferIdx int
}
func (r *sliceReader) Remaining() int {
// Remaining returns the number of unread bytes remaining in the slice.
func (r *Reader) Remaining() int {
return r.len
}
func (r *sliceReader) Reset(s BufferSlice) {
// Reset frees the currently held buffer slice and starts reading from the
// provided slice. This allows reusing the reader object.
func (r *Reader) Reset(s BufferSlice) {
r.data.Free()
s.Ref()
r.data = s
@@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) {
r.bufferIdx = 0
}
func (r *sliceReader) Close() error {
// Close frees the underlying BufferSlice and never returns an error. Subsequent
// calls to Read will return (0, io.EOF).
func (r *Reader) Close() error {
r.data.Free()
r.data = nil
r.len = 0
return nil
}
func (r *sliceReader) freeFirstBufferIfEmpty() bool {
func (r *Reader) freeFirstBufferIfEmpty() bool {
if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
return false
}
@@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool {
return true
}
func (r *sliceReader) Read(buf []byte) (n int, _ error) {
func (r *Reader) Read(buf []byte) (n int, _ error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) {
return n, nil
}
func (r *sliceReader) ReadByte() (byte, error) {
// ReadByte reads a single byte.
func (r *Reader) ReadByte() (byte, error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -290,3 +287,59 @@ nextBuffer:
}
}
}
// Discard skips the next n bytes, returning the number of bytes discarded.
//
// It frees buffers as they are fully consumed.
//
// If Discard skips fewer than n bytes, it also returns an error.
func (r *Reader) Discard(n int) (discarded int, err error) {
total := n
for n > 0 && r.len > 0 {
curData := r.data[0].ReadOnlyData()
curSize := min(n, len(curData)-r.bufferIdx)
n -= curSize
r.len -= curSize
r.bufferIdx += curSize
if r.bufferIdx >= len(curData) {
r.data[0].Free()
r.data = r.data[1:]
r.bufferIdx = 0
}
}
discarded = total - n
if n > 0 {
return discarded, fmt.Errorf("insufficient bytes in reader")
}
return discarded, nil
}
// Peek returns the next n bytes without advancing the reader.
//
// Peek appends results to the provided res slice and returns the updated slice.
// This pattern allows re-using the storage of res if it has sufficient
// capacity.
//
// The returned subslices are views into the underlying buffers and are only
// valid until the reader is advanced past the corresponding buffer.
//
// If Peek returns fewer than n bytes, it also returns an error.
func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) {
for i := 0; n > 0 && i < len(r.data); i++ {
curData := r.data[i].ReadOnlyData()
start := 0
if i == 0 {
start = r.bufferIdx
}
curSize := min(n, len(curData)-start)
if curSize == 0 {
continue
}
res = append(res, curData[start:start+curSize])
n -= curSize
}
if n > 0 {
return nil, fmt.Errorf("insufficient bytes in reader")
}
return res, nil
}

View File

@@ -62,7 +62,6 @@ var (
bufferPoolingThreshold = 1 << 10
bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
)
// IsBelowBufferPoolingThreshold returns true if the given size is less than or
@@ -73,9 +72,19 @@ func IsBelowBufferPoolingThreshold(size int) bool {
}
type buffer struct {
refs atomic.Int32
data []byte
// rootBuf is the buffer responsible for returning origData to the pool
// once the reference count drops to 0.
//
// When a buffer is split, the new buffer inherits the rootBuf of the
// original and increments the root's reference count. For the
// initial buffer (the root), this field points to itself.
rootBuf *buffer
// The following fields are only set for root buffers.
origData *[]byte
data []byte
refs *atomic.Int32
pool BufferPool
}
@@ -103,8 +112,8 @@ func NewBuffer(data *[]byte, pool BufferPool) Buffer {
b.origData = data
b.data = *data
b.pool = pool
b.refs = refObjectPool.Get().(*atomic.Int32)
b.refs.Add(1)
b.rootBuf = b
b.refs.Store(1)
return b
}
@@ -127,42 +136,44 @@ func Copy(data []byte, pool BufferPool) Buffer {
}
func (b *buffer) ReadOnlyData() []byte {
if b.refs == nil {
if b.rootBuf == nil {
panic("Cannot read freed buffer")
}
return b.data
}
func (b *buffer) Ref() {
if b.refs == nil {
if b.refs.Add(1) <= 1 {
panic("Cannot ref freed buffer")
}
b.refs.Add(1)
}
func (b *buffer) Free() {
if b.refs == nil {
refs := b.refs.Add(-1)
if refs < 0 {
panic("Cannot free freed buffer")
}
refs := b.refs.Add(-1)
switch {
case refs > 0:
if refs > 0 {
return
case refs == 0:
}
b.data = nil
if b.rootBuf == b {
// This buffer is the owner of the data slice and its ref count reached
// 0, free the slice.
if b.pool != nil {
b.pool.Put(b.origData)
b.pool = nil
}
refObjectPool.Put(b.refs)
b.origData = nil
b.data = nil
b.refs = nil
b.pool = nil
bufferObjectPool.Put(b)
default:
panic("Cannot free freed buffer")
} else {
// This buffer doesn't own the data slice, decrement a ref on the root
// buffer.
b.rootBuf.Free()
}
b.rootBuf = nil
bufferObjectPool.Put(b)
}
func (b *buffer) Len() int {
@@ -170,16 +181,14 @@ func (b *buffer) Len() int {
}
func (b *buffer) split(n int) (Buffer, Buffer) {
if b.refs == nil {
if b.rootBuf == nil || b.rootBuf.refs.Add(1) <= 1 {
panic("Cannot split freed buffer")
}
b.refs.Add(1)
split := newBuffer()
split.origData = b.origData
split.data = b.data[n:]
split.refs = b.refs
split.pool = b.pool
split.rootBuf = b.rootBuf
split.refs.Store(1)
b.data = b.data[:n]
@@ -187,7 +196,7 @@ func (b *buffer) split(n int) (Buffer, Buffer) {
}
func (b *buffer) read(buf []byte) (int, Buffer) {
if b.refs == nil {
if b.rootBuf == nil {
panic("Cannot read freed buffer")
}