Add SSO login support
- Add [sso] config section with redirect_uri - Create mcdsl/sso client when SSO is configured - Add /login (landing page), /sso/redirect, /sso/callback routes - Add /logout route - Update login template with SSO landing page variant - Bump mcdsl to v1.6.0 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2
go.mod
2
go.mod
@@ -4,7 +4,7 @@ go 1.25.7
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
git.wntrmute.dev/kyle/goutils v1.21.0
|
git.wntrmute.dev/kyle/goutils v1.21.0
|
||||||
git.wntrmute.dev/mc/mcdsl v1.4.0
|
git.wntrmute.dev/mc/mcdsl v1.6.0
|
||||||
github.com/go-chi/chi/v5 v5.2.5
|
github.com/go-chi/chi/v5 v5.2.5
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/spf13/viper v1.21.0
|
github.com/spf13/viper v1.21.0
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -2,6 +2,8 @@ git.wntrmute.dev/kyle/goutils v1.21.0 h1:ZR7ovV400hsF09zc8tkdHs6vyen8TDJ7flong/d
|
|||||||
git.wntrmute.dev/kyle/goutils v1.21.0/go.mod h1:JQ8NL5lHSEYl719UMf20p4G1ei70RVGma0hjjNXCR2c=
|
git.wntrmute.dev/kyle/goutils v1.21.0/go.mod h1:JQ8NL5lHSEYl719UMf20p4G1ei70RVGma0hjjNXCR2c=
|
||||||
git.wntrmute.dev/mc/mcdsl v1.4.0 h1:PsEIyskcjBduwHSRwNB/U/uSeU/cv3C8MVr0SRjBRLg=
|
git.wntrmute.dev/mc/mcdsl v1.4.0 h1:PsEIyskcjBduwHSRwNB/U/uSeU/cv3C8MVr0SRjBRLg=
|
||||||
git.wntrmute.dev/mc/mcdsl v1.4.0/go.mod h1:MhYahIu7Sg53lE2zpQ20nlrsoNRjQzOJBAlCmom2wJc=
|
git.wntrmute.dev/mc/mcdsl v1.4.0/go.mod h1:MhYahIu7Sg53lE2zpQ20nlrsoNRjQzOJBAlCmom2wJc=
|
||||||
|
git.wntrmute.dev/mc/mcdsl v1.6.0 h1:Vn1uy6b1yZ4Y8fsl1+kLucVprrFKlQ4SN2cjUH/Eg2k=
|
||||||
|
git.wntrmute.dev/mc/mcdsl v1.6.0/go.mod h1:MhYahIu7Sg53lE2zpQ20nlrsoNRjQzOJBAlCmom2wJc=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
|
|||||||
@@ -10,13 +10,14 @@ import (
|
|||||||
|
|
||||||
// Config is the top-level configuration for Metacrypt.
|
// Config is the top-level configuration for Metacrypt.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Server ServerConfig `toml:"server"`
|
Server ServerConfig `toml:"server"`
|
||||||
Web WebConfig `toml:"web"`
|
Web WebConfig `toml:"web"`
|
||||||
MCIAS MCIASConfig `toml:"mcias"`
|
MCIAS MCIASConfig `toml:"mcias"`
|
||||||
|
SSO SSOConfig `toml:"sso"`
|
||||||
Database mcdslconfig.DatabaseConfig `toml:"database"`
|
Database mcdslconfig.DatabaseConfig `toml:"database"`
|
||||||
Log mcdslconfig.LogConfig `toml:"log"`
|
Log mcdslconfig.LogConfig `toml:"log"`
|
||||||
Seal SealConfig `toml:"seal"`
|
Seal SealConfig `toml:"seal"`
|
||||||
Audit AuditConfig `toml:"audit"`
|
Audit AuditConfig `toml:"audit"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerConfig holds HTTP/gRPC server settings. It embeds the standard
|
// ServerConfig holds HTTP/gRPC server settings. It embeds the standard
|
||||||
@@ -33,6 +34,13 @@ type MCIASConfig struct {
|
|||||||
ServiceToken string `toml:"service_token"`
|
ServiceToken string `toml:"service_token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SSOConfig holds SSO redirect settings for the web UI.
|
||||||
|
type SSOConfig struct {
|
||||||
|
// RedirectURI is the callback URL that MCIAS redirects to after login.
|
||||||
|
// Must exactly match the redirect_uri registered in MCIAS config.
|
||||||
|
RedirectURI string `toml:"redirect_uri"`
|
||||||
|
}
|
||||||
|
|
||||||
// WebConfig holds settings for the standalone web UI server (metacrypt-web).
|
// WebConfig holds settings for the standalone web UI server (metacrypt-web).
|
||||||
type WebConfig struct {
|
type WebConfig struct {
|
||||||
// ListenAddr is the address the web server listens on (default: 127.0.0.1:8080).
|
// ListenAddr is the address the web server listens on (default: 127.0.0.1:8080).
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import (
|
|||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
mcdsso "git.wntrmute.dev/mc/mcdsl/sso"
|
||||||
"git.wntrmute.dev/mc/mcdsl/web"
|
"git.wntrmute.dev/mc/mcdsl/web"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,7 +38,14 @@ func (ws *WebServer) registerRoutes(r chi.Router) {
|
|||||||
r.Get("/", ws.handleRoot)
|
r.Get("/", ws.handleRoot)
|
||||||
r.HandleFunc("/init", ws.handleInit)
|
r.HandleFunc("/init", ws.handleInit)
|
||||||
r.HandleFunc("/unseal", ws.handleUnseal)
|
r.HandleFunc("/unseal", ws.handleUnseal)
|
||||||
r.HandleFunc("/login", ws.handleLogin)
|
if ws.ssoClient != nil {
|
||||||
|
r.Get("/login", ws.handleSSOLogin)
|
||||||
|
r.Get("/sso/redirect", ws.handleSSORedirect)
|
||||||
|
r.Get("/sso/callback", ws.handleSSOCallback)
|
||||||
|
} else {
|
||||||
|
r.HandleFunc("/login", ws.handleLogin)
|
||||||
|
}
|
||||||
|
r.Get("/logout", ws.handleLogout)
|
||||||
r.Get("/dashboard", ws.requireAuth(ws.handleDashboard))
|
r.Get("/dashboard", ws.requireAuth(ws.handleDashboard))
|
||||||
r.Post("/dashboard/mount-ca", ws.requireAuth(ws.handleDashboardMountCA))
|
r.Post("/dashboard/mount-ca", ws.requireAuth(ws.handleDashboardMountCA))
|
||||||
r.Post("/dashboard/mount-engine", ws.requireAuth(ws.handleDashboardMountEngine))
|
r.Post("/dashboard/mount-engine", ws.requireAuth(ws.handleDashboardMountEngine))
|
||||||
@@ -236,6 +244,43 @@ func (ws *WebServer) handleLogin(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleSSOLogin renders a landing page with a "Sign in with MCIAS" button.
|
||||||
|
func (ws *WebServer) handleSSOLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
|
state, _ := ws.vault.Status(r.Context())
|
||||||
|
if state != "unsealed" {
|
||||||
|
http.Redirect(w, r, "/", http.StatusFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ws.renderTemplate(w, "login.html", map[string]interface{}{"SSO": true})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSSORedirect initiates the SSO redirect to MCIAS.
|
||||||
|
func (ws *WebServer) handleSSORedirect(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if err := mcdsso.RedirectToLogin(w, r, ws.ssoClient, "metacrypt"); err != nil {
|
||||||
|
ws.logger.Error("sso: redirect to login", "error", err)
|
||||||
|
http.Error(w, "internal error", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSSOCallback exchanges the authorization code for a JWT and sets the session.
|
||||||
|
func (ws *WebServer) handleSSOCallback(w http.ResponseWriter, r *http.Request) {
|
||||||
|
token, returnTo, err := mcdsso.HandleCallback(w, r, ws.ssoClient, "metacrypt")
|
||||||
|
if err != nil {
|
||||||
|
ws.logger.Error("sso: callback", "error", err)
|
||||||
|
http.Error(w, "Login failed. Please try again.", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
web.SetSessionCookie(w, "metacrypt_token", token)
|
||||||
|
http.Redirect(w, r, returnTo, http.StatusSeeOther)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleLogout clears the session and redirects to login.
|
||||||
|
func (ws *WebServer) handleLogout(w http.ResponseWriter, r *http.Request) {
|
||||||
|
web.ClearSessionCookie(w, "metacrypt_token")
|
||||||
|
http.Redirect(w, r, "/login", http.StatusFound)
|
||||||
|
}
|
||||||
|
|
||||||
func (ws *WebServer) handleDashboard(w http.ResponseWriter, r *http.Request) {
|
func (ws *WebServer) handleDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
info := tokenInfoFromContext(r.Context())
|
info := tokenInfoFromContext(r.Context())
|
||||||
token := extractCookie(r)
|
token := extractCookie(r)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import (
|
|||||||
|
|
||||||
mcdslauth "git.wntrmute.dev/mc/mcdsl/auth"
|
mcdslauth "git.wntrmute.dev/mc/mcdsl/auth"
|
||||||
"git.wntrmute.dev/mc/mcdsl/csrf"
|
"git.wntrmute.dev/mc/mcdsl/csrf"
|
||||||
|
mcdsso "git.wntrmute.dev/mc/mcdsl/sso"
|
||||||
"git.wntrmute.dev/mc/mcdsl/web"
|
"git.wntrmute.dev/mc/mcdsl/web"
|
||||||
"git.wntrmute.dev/mc/metacrypt/internal/config"
|
"git.wntrmute.dev/mc/metacrypt/internal/config"
|
||||||
webui "git.wntrmute.dev/mc/metacrypt/web"
|
webui "git.wntrmute.dev/mc/metacrypt/web"
|
||||||
@@ -115,10 +116,11 @@ type cachedUsername struct {
|
|||||||
type WebServer struct {
|
type WebServer struct {
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
vault vaultBackend
|
vault vaultBackend
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
httpSrv *http.Server
|
httpSrv *http.Server
|
||||||
staticFS fs.FS
|
staticFS fs.FS
|
||||||
csrf *csrf.Protect
|
csrf *csrf.Protect
|
||||||
|
ssoClient *mcdsso.Client
|
||||||
tgzCache sync.Map // key: UUID string → *tgzEntry
|
tgzCache sync.Map // key: UUID string → *tgzEntry
|
||||||
userCache sync.Map // key: UUID string → *cachedUsername
|
userCache sync.Map // key: UUID string → *cachedUsername
|
||||||
}
|
}
|
||||||
@@ -169,6 +171,21 @@ func New(cfg *config.Config, logger *slog.Logger) (*WebServer, error) {
|
|||||||
csrf: csrf.New(secret, "metacrypt_csrf", "csrf_token"),
|
csrf: csrf.New(secret, "metacrypt_csrf", "csrf_token"),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create SSO client if the service has an SSO redirect_uri configured.
|
||||||
|
if cfg.SSO.RedirectURI != "" {
|
||||||
|
ssoClient, ssoErr := mcdsso.New(mcdsso.Config{
|
||||||
|
MciasURL: cfg.MCIAS.ServerURL,
|
||||||
|
ClientID: "metacrypt",
|
||||||
|
RedirectURI: cfg.SSO.RedirectURI,
|
||||||
|
CACert: cfg.MCIAS.CACert,
|
||||||
|
})
|
||||||
|
if ssoErr != nil {
|
||||||
|
return nil, fmt.Errorf("webserver: create SSO client: %w", ssoErr)
|
||||||
|
}
|
||||||
|
ws.ssoClient = ssoClient
|
||||||
|
logger.Info("SSO enabled: redirecting to MCIAS for login", "mcias_url", cfg.MCIAS.ServerURL)
|
||||||
|
}
|
||||||
|
|
||||||
if tok := cfg.MCIAS.ServiceToken; tok != "" {
|
if tok := cfg.MCIAS.ServiceToken; tok != "" {
|
||||||
a, err := mcdslauth.New(mcdslauth.Config{
|
a, err := mcdslauth.New(mcdslauth.Config{
|
||||||
ServerURL: cfg.MCIAS.ServerURL,
|
ServerURL: cfg.MCIAS.ServerURL,
|
||||||
|
|||||||
219
vendor/git.wntrmute.dev/kyle/goutils/LICENSE
vendored
Normal file
219
vendor/git.wntrmute.dev/kyle/goutils/LICENSE
vendored
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
Copyright 2025 K. Isom <kyle@imap.cc>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
|
||||||
|
The backoff package (written during my time at Cloudflare) is released
|
||||||
|
under the following license:
|
||||||
|
|
||||||
|
Copyright (c) 2016 CloudFlare Inc.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
|
||||||
|
Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
276
vendor/git.wntrmute.dev/kyle/goutils/certlib/certgen/config.go
vendored
Normal file
276
vendor/git.wntrmute.dev/kyle/goutils/certlib/certgen/config.go
vendored
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
package certgen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
type KeySpec struct {
|
||||||
|
Algorithm string `yaml:"algorithm"`
|
||||||
|
Size int `yaml:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ks KeySpec) String() string {
|
||||||
|
if strings.ToLower(ks.Algorithm) == nameEd25519 {
|
||||||
|
return nameEd25519
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s-%d", ks.Algorithm, ks.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ks KeySpec) Generate() (crypto.PublicKey, crypto.PrivateKey, error) {
|
||||||
|
switch strings.ToLower(ks.Algorithm) {
|
||||||
|
case "rsa":
|
||||||
|
return GenerateKey(x509.RSA, ks.Size)
|
||||||
|
case "ecdsa":
|
||||||
|
return GenerateKey(x509.ECDSA, ks.Size)
|
||||||
|
case nameEd25519:
|
||||||
|
return GenerateKey(x509.Ed25519, 0)
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("unknown key algorithm: %s", ks.Algorithm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ks KeySpec) SigningAlgorithm() (x509.SignatureAlgorithm, error) {
|
||||||
|
switch strings.ToLower(ks.Algorithm) {
|
||||||
|
case "rsa":
|
||||||
|
return x509.SHA512WithRSAPSS, nil
|
||||||
|
case "ecdsa":
|
||||||
|
return x509.ECDSAWithSHA512, nil
|
||||||
|
case nameEd25519:
|
||||||
|
return x509.PureEd25519, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown key algorithm: %s", ks.Algorithm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Subject struct {
|
||||||
|
CommonName string `yaml:"common_name"`
|
||||||
|
Country string `yaml:"country"`
|
||||||
|
Locality string `yaml:"locality"`
|
||||||
|
Province string `yaml:"province"`
|
||||||
|
Organization string `yaml:"organization"`
|
||||||
|
OrganizationalUnit string `yaml:"organizational_unit"`
|
||||||
|
Email []string `yaml:"email"`
|
||||||
|
DNSNames []string `yaml:"dns"`
|
||||||
|
IPAddresses []string `yaml:"ips"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CertificateRequest struct {
|
||||||
|
KeySpec KeySpec `yaml:"key"`
|
||||||
|
Subject Subject `yaml:"subject"`
|
||||||
|
Profile Profile `yaml:"profile"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs CertificateRequest) Request(priv crypto.PrivateKey) (*x509.CertificateRequest, error) {
|
||||||
|
subject := pkix.Name{}
|
||||||
|
subject.CommonName = cs.Subject.CommonName
|
||||||
|
subject.Country = []string{cs.Subject.Country}
|
||||||
|
subject.Locality = []string{cs.Subject.Locality}
|
||||||
|
subject.Province = []string{cs.Subject.Province}
|
||||||
|
subject.Organization = []string{cs.Subject.Organization}
|
||||||
|
subject.OrganizationalUnit = []string{cs.Subject.OrganizationalUnit}
|
||||||
|
|
||||||
|
ipAddresses := make([]net.IP, 0, len(cs.Subject.IPAddresses))
|
||||||
|
for i, ip := range cs.Subject.IPAddresses {
|
||||||
|
ipAddresses = append(ipAddresses, net.ParseIP(ip))
|
||||||
|
if ipAddresses[i] == nil {
|
||||||
|
return nil, fmt.Errorf("invalid IP address: %s", ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsNames := cs.Subject.DNSNames
|
||||||
|
if isFQDN(cs.Subject.CommonName) && !slices.Contains(dnsNames, cs.Subject.CommonName) {
|
||||||
|
dnsNames = append(dnsNames, cs.Subject.CommonName)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &x509.CertificateRequest{
|
||||||
|
PublicKeyAlgorithm: 0,
|
||||||
|
PublicKey: getPublic(priv),
|
||||||
|
Subject: subject,
|
||||||
|
EmailAddresses: cs.Subject.Email,
|
||||||
|
DNSNames: dnsNames,
|
||||||
|
IPAddresses: ipAddresses,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBytes, err := x509.CreateCertificateRequest(rand.Reader, req, priv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create certificate request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err = x509.ParseCertificateRequest(reqBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse certificate request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs CertificateRequest) Generate() (crypto.PrivateKey, *x509.CertificateRequest, error) {
|
||||||
|
_, priv, err := cs.KeySpec.Generate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := cs.Request(priv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return priv, req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Profile struct {
|
||||||
|
IsCA bool `yaml:"is_ca"`
|
||||||
|
PathLen int `yaml:"path_len"`
|
||||||
|
KeyUse []string `yaml:"key_uses"`
|
||||||
|
ExtKeyUsages []string `yaml:"ext_key_usages"`
|
||||||
|
Expiry string `yaml:"expiry"`
|
||||||
|
OCSPServer []string `yaml:"ocsp_server,omitempty"`
|
||||||
|
IssuingCertificateURL []string `yaml:"issuing_certificate_url,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Profile) templateFromRequest(req *x509.CertificateRequest) (*x509.Certificate, error) {
|
||||||
|
serial, err := SerialNumber()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate serial number: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expiry, err := lib.ParseDuration(p.Expiry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing expiry: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
certTemplate := &x509.Certificate{
|
||||||
|
SignatureAlgorithm: req.SignatureAlgorithm,
|
||||||
|
PublicKeyAlgorithm: req.PublicKeyAlgorithm,
|
||||||
|
PublicKey: req.PublicKey,
|
||||||
|
SerialNumber: serial,
|
||||||
|
Subject: req.Subject,
|
||||||
|
NotBefore: time.Now().Add(-1 * time.Hour),
|
||||||
|
NotAfter: time.Now().Add(expiry),
|
||||||
|
BasicConstraintsValid: true,
|
||||||
|
IsCA: p.IsCA,
|
||||||
|
MaxPathLen: p.PathLen,
|
||||||
|
DNSNames: req.DNSNames,
|
||||||
|
IPAddresses: req.IPAddresses,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sku := range p.KeyUse {
|
||||||
|
ku, ok := keyUsageStrings[sku]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid key usage: %s", p.KeyUse)
|
||||||
|
}
|
||||||
|
|
||||||
|
certTemplate.KeyUsage |= ku
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, extKeyUsage := range p.ExtKeyUsages {
|
||||||
|
eku, ok := extKeyUsageStrings[extKeyUsage]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid extended key usage: %s", extKeyUsage)
|
||||||
|
}
|
||||||
|
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, eku)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.OCSPServer) > 0 {
|
||||||
|
certTemplate.OCSPServer = p.OCSPServer
|
||||||
|
}
|
||||||
|
if len(p.IssuingCertificateURL) > 0 {
|
||||||
|
certTemplate.IssuingCertificateURL = p.IssuingCertificateURL
|
||||||
|
}
|
||||||
|
|
||||||
|
return certTemplate, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Profile) SignRequest(
|
||||||
|
parent *x509.Certificate,
|
||||||
|
req *x509.CertificateRequest,
|
||||||
|
priv crypto.PrivateKey,
|
||||||
|
) (*x509.Certificate, error) {
|
||||||
|
tpl, err := p.templateFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create certificate template: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
certBytes, err := x509.CreateCertificate(rand.Reader, tpl, parent, req.PublicKey, priv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create certificate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cert, err := x509.ParseCertificate(certBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse certificate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cert, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Profile) SelfSign(req *x509.CertificateRequest, priv crypto.PrivateKey) (*x509.Certificate, error) {
|
||||||
|
certTemplate, err := p.templateFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create certificate template: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.SignRequest(certTemplate, req, priv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isFQDN returns true if s looks like a fully-qualified domain name.
|
||||||
|
func isFQDN(s string) bool {
|
||||||
|
if s == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Must contain at least one dot and no spaces.
|
||||||
|
if !strings.Contains(s, ".") || strings.ContainsAny(s, " \t") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Each label must be non-empty and consist of letters, digits, or hyphens.
|
||||||
|
for label := range strings.SplitSeq(strings.TrimSuffix(s, "."), ".") {
|
||||||
|
if label == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range label {
|
||||||
|
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '-') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if label[0] == '-' || label[len(label)-1] == '-' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func SerialNumber() (*big.Int, error) {
|
||||||
|
serialNumberBytes := make([]byte, 20)
|
||||||
|
_, err := rand.Read(serialNumberBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate serial number: %w", err)
|
||||||
|
}
|
||||||
|
return new(big.Int).SetBytes(serialNumberBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateSelfSigned generates a self-signed certificate using the given certificate request.
|
||||||
|
func GenerateSelfSigned(creq *CertificateRequest) (*x509.Certificate, crypto.PrivateKey, error) {
|
||||||
|
priv, req, err := creq.Generate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to generate certificate request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cert, err := creq.Profile.SelfSign(req, priv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to self-sign certificate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cert, priv, nil
|
||||||
|
}
|
||||||
90
vendor/git.wntrmute.dev/kyle/goutils/certlib/certgen/keygen.go
vendored
Normal file
90
vendor/git.wntrmute.dev/kyle/goutils/certlib/certgen/keygen.go
vendored
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
package certgen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// var (
|
||||||
|
// oidEd25519 = asn1.ObjectIdentifier{1, 3, 101, 110}
|
||||||
|
//)
|
||||||
|
|
||||||
|
const (
|
||||||
|
nameEd25519 = "ed25519"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GenerateKey(algorithm x509.PublicKeyAlgorithm, bitSize int) (crypto.PublicKey, crypto.PrivateKey, error) {
|
||||||
|
var key crypto.PrivateKey
|
||||||
|
var pub crypto.PublicKey
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch algorithm {
|
||||||
|
case x509.Ed25519:
|
||||||
|
pub, key, err = ed25519.GenerateKey(rand.Reader)
|
||||||
|
case x509.RSA:
|
||||||
|
key, err = rsa.GenerateKey(rand.Reader, bitSize)
|
||||||
|
if err == nil {
|
||||||
|
rsaPriv, ok := key.(*rsa.PrivateKey)
|
||||||
|
if !ok {
|
||||||
|
panic("failed to cast RSA private key to *rsa.PrivateKey")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub = rsaPriv.Public()
|
||||||
|
}
|
||||||
|
case x509.ECDSA:
|
||||||
|
var curve elliptic.Curve
|
||||||
|
|
||||||
|
switch bitSize {
|
||||||
|
case 256:
|
||||||
|
curve = elliptic.P256()
|
||||||
|
case 384:
|
||||||
|
curve = elliptic.P384()
|
||||||
|
case 521:
|
||||||
|
curve = elliptic.P521()
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("unsupported curve size %d", bitSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err = ecdsa.GenerateKey(curve, rand.Reader)
|
||||||
|
if err == nil {
|
||||||
|
ecPriv, ok := key.(*ecdsa.PrivateKey)
|
||||||
|
if !ok {
|
||||||
|
panic("failed to cast ECDSA private key to *ecdsa.PrivateKey")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub = ecPriv.Public()
|
||||||
|
}
|
||||||
|
case x509.DSA:
|
||||||
|
fallthrough
|
||||||
|
case x509.UnknownPublicKeyAlgorithm:
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
err = errors.New("unsupported algorithm")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pub, key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPublic(priv crypto.PrivateKey) crypto.PublicKey {
|
||||||
|
switch priv := priv.(type) {
|
||||||
|
case *rsa.PrivateKey:
|
||||||
|
return &priv.PublicKey
|
||||||
|
case *ecdsa.PrivateKey:
|
||||||
|
return &priv.PublicKey
|
||||||
|
case *ed25519.PrivateKey:
|
||||||
|
return priv.Public()
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
32
vendor/git.wntrmute.dev/kyle/goutils/certlib/certgen/ku.go
vendored
Normal file
32
vendor/git.wntrmute.dev/kyle/goutils/certlib/certgen/ku.go
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package certgen
|
||||||
|
|
||||||
|
import "crypto/x509"
|
||||||
|
|
||||||
|
var keyUsageStrings = map[string]x509.KeyUsage{
|
||||||
|
"signing": x509.KeyUsageDigitalSignature,
|
||||||
|
"digital signature": x509.KeyUsageDigitalSignature,
|
||||||
|
"content commitment": x509.KeyUsageContentCommitment,
|
||||||
|
"key encipherment": x509.KeyUsageKeyEncipherment,
|
||||||
|
"key agreement": x509.KeyUsageKeyAgreement,
|
||||||
|
"data encipherment": x509.KeyUsageDataEncipherment,
|
||||||
|
"cert sign": x509.KeyUsageCertSign,
|
||||||
|
"crl sign": x509.KeyUsageCRLSign,
|
||||||
|
"encipher only": x509.KeyUsageEncipherOnly,
|
||||||
|
"decipher only": x509.KeyUsageDecipherOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
var extKeyUsageStrings = map[string]x509.ExtKeyUsage{
|
||||||
|
"any": x509.ExtKeyUsageAny,
|
||||||
|
"server auth": x509.ExtKeyUsageServerAuth,
|
||||||
|
"client auth": x509.ExtKeyUsageClientAuth,
|
||||||
|
"code signing": x509.ExtKeyUsageCodeSigning,
|
||||||
|
"email protection": x509.ExtKeyUsageEmailProtection,
|
||||||
|
"s/mime": x509.ExtKeyUsageEmailProtection,
|
||||||
|
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
|
||||||
|
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
|
||||||
|
"ipsec user": x509.ExtKeyUsageIPSECUser,
|
||||||
|
"timestamping": x509.ExtKeyUsageTimeStamping,
|
||||||
|
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
|
||||||
|
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
|
||||||
|
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
|
||||||
|
}
|
||||||
21
vendor/git.wntrmute.dev/kyle/goutils/lib/defs.go
vendored
Normal file
21
vendor/git.wntrmute.dev/kyle/goutils/lib/defs.go
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package lib
|
||||||
|
|
||||||
|
// Various constants used throughout the tools.
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ExitSuccess is the successful exit status.
|
||||||
|
//
|
||||||
|
// It should be called on successful exit.
|
||||||
|
ExitSuccess = 0
|
||||||
|
|
||||||
|
// ExitFailure is the failing exit status.
|
||||||
|
ExitFailure = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
OneTrueDateFormat = "2006-01-02T15:04:05-0700"
|
||||||
|
DateShortFormat = "2006-01-02"
|
||||||
|
TimeShortFormat = "15:04:05"
|
||||||
|
TimeShorterFormat = "15:04"
|
||||||
|
TimeStandardDateTime = "2006-01-02 15:04"
|
||||||
|
)
|
||||||
37
vendor/git.wntrmute.dev/kyle/goutils/lib/ftime_bsd.go
vendored
Normal file
37
vendor/git.wntrmute.dev/kyle/goutils/lib/ftime_bsd.go
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
//go:build bsd
|
||||||
|
|
||||||
|
package lib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileTime contains the changed, modified, and accessed timestamps
|
||||||
|
// for a file.
|
||||||
|
type FileTime struct {
|
||||||
|
Changed time.Time
|
||||||
|
Modified time.Time
|
||||||
|
Accessed time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeSpecToTime(ts unix.Timespec) time.Time {
|
||||||
|
return time.Unix(ts.Sec, ts.Nsec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFileTime returns a FileTime associated with the file.
|
||||||
|
func LoadFileTime(path string) (FileTime, error) {
|
||||||
|
var ft = FileTime{}
|
||||||
|
var st = unix.Stat_t{}
|
||||||
|
|
||||||
|
err := unix.Stat(path, &st)
|
||||||
|
if err != nil {
|
||||||
|
return ft, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ft.Changed = timeSpecToTime(st.Ctimespec)
|
||||||
|
ft.Modified = timeSpecToTime(st.Mtimespec)
|
||||||
|
ft.Accessed = timeSpecToTime(st.Atimespec)
|
||||||
|
return ft, nil
|
||||||
|
}
|
||||||
38
vendor/git.wntrmute.dev/kyle/goutils/lib/ftime_unix.go
vendored
Normal file
38
vendor/git.wntrmute.dev/kyle/goutils/lib/ftime_unix.go
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
//go:build unix || linux || openbsd || (darwin && amd64)
|
||||||
|
|
||||||
|
package lib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileTime contains the changed, modified, and accessed timestamps
|
||||||
|
// for a file.
|
||||||
|
type FileTime struct {
|
||||||
|
Changed time.Time
|
||||||
|
Modified time.Time
|
||||||
|
Accessed time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeSpecToTime(ts unix.Timespec) time.Time {
|
||||||
|
// The casts to int64 are needed because on 386, these are int32s.
|
||||||
|
return time.Unix(ts.Sec, ts.Nsec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFileTime returns a FileTime associated with the file.
|
||||||
|
func LoadFileTime(path string) (FileTime, error) {
|
||||||
|
var ft = FileTime{}
|
||||||
|
var st = unix.Stat_t{}
|
||||||
|
|
||||||
|
err := unix.Stat(path, &st)
|
||||||
|
if err != nil {
|
||||||
|
return ft, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ft.Changed = timeSpecToTime(st.Ctim)
|
||||||
|
ft.Modified = timeSpecToTime(st.Mtim)
|
||||||
|
ft.Accessed = timeSpecToTime(st.Atim)
|
||||||
|
return ft, nil
|
||||||
|
}
|
||||||
349
vendor/git.wntrmute.dev/kyle/goutils/lib/lib.go
vendored
Normal file
349
vendor/git.wntrmute.dev/kyle/goutils/lib/lib.go
vendored
Normal file
@@ -0,0 +1,349 @@
|
|||||||
|
package lib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var progname = filepath.Base(os.Args[0])
|
||||||
|
|
||||||
|
const (
|
||||||
|
daysInYear = 365
|
||||||
|
digitWidth = 10
|
||||||
|
hoursInQuarterDay = 6
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProgName returns what lib thinks the program name is, namely the
|
||||||
|
// basename of argv0.
|
||||||
|
//
|
||||||
|
// It is similar to the Linux __progname function.
|
||||||
|
func ProgName() string {
|
||||||
|
return progname
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnx displays a formatted error message to standard error, à la
|
||||||
|
// warnx(3).
|
||||||
|
func Warnx(format string, a ...any) (int, error) {
|
||||||
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
|
format += "\n"
|
||||||
|
return fmt.Fprintf(os.Stderr, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn displays a formatted error message to standard output,
|
||||||
|
// appending the error string, à la warn(3).
|
||||||
|
func Warn(err error, format string, a ...any) (int, error) {
|
||||||
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
|
format += ": %v\n"
|
||||||
|
a = append(a, err)
|
||||||
|
return fmt.Fprintf(os.Stderr, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errx displays a formatted error message to standard error and exits
|
||||||
|
// with the status code from `exit`, à la errx(3).
|
||||||
|
func Errx(exit int, format string, a ...any) {
|
||||||
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
|
format += "\n"
|
||||||
|
fmt.Fprintf(os.Stderr, format, a...)
|
||||||
|
os.Exit(exit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err displays a formatting error message to standard error,
|
||||||
|
// appending the error string, and exits with the status code from
|
||||||
|
// `exit`, à la err(3).
|
||||||
|
func Err(exit int, err error, format string, a ...any) {
|
||||||
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
|
format += ": %v\n"
|
||||||
|
a = append(a, err)
|
||||||
|
fmt.Fprintf(os.Stderr, format, a...)
|
||||||
|
os.Exit(exit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Itoa provides cheap integer to fixed-width decimal ASCII. Give a
|
||||||
|
// negative width to avoid zero-padding. Adapted from the 'itoa'
|
||||||
|
// function in the log/log.go file in the standard library.
|
||||||
|
func Itoa(i int, wid int) string {
|
||||||
|
// Assemble decimal in reverse order.
|
||||||
|
var b [20]byte
|
||||||
|
bp := len(b) - 1
|
||||||
|
for i >= digitWidth || wid > 1 {
|
||||||
|
wid--
|
||||||
|
q := i / digitWidth
|
||||||
|
b[bp] = byte('0' + i - q*digitWidth)
|
||||||
|
bp--
|
||||||
|
i = q
|
||||||
|
}
|
||||||
|
|
||||||
|
b[bp] = byte('0' + i)
|
||||||
|
return string(b[bp:])
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dayDuration = 24 * time.Hour
|
||||||
|
yearDuration = (daysInYear * dayDuration) + (hoursInQuarterDay * time.Hour)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Duration returns a prettier string for time.Durations.
|
||||||
|
func Duration(d time.Duration) string {
|
||||||
|
var s string
|
||||||
|
if d >= yearDuration {
|
||||||
|
years := int64(d / yearDuration)
|
||||||
|
s += fmt.Sprintf("%dy", years)
|
||||||
|
d -= time.Duration(years) * yearDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
if d >= dayDuration {
|
||||||
|
days := d / dayDuration
|
||||||
|
s += fmt.Sprintf("%dd", days)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s != "" {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
d %= 1 * time.Second
|
||||||
|
hours := int64(d / time.Hour)
|
||||||
|
d -= time.Duration(hours) * time.Hour
|
||||||
|
s += fmt.Sprintf("%dh%s", hours, d)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDigit checks if a byte is a decimal digit.
|
||||||
|
func IsDigit(b byte) bool {
|
||||||
|
return b >= '0' && b <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
const signedaMask64 = 1<<63 - 1
|
||||||
|
|
||||||
|
// ParseDuration parses a duration string into a time.Duration.
|
||||||
|
// It supports standard units (ns, us/µs, ms, s, m, h) plus extended units:
|
||||||
|
// d (days, 24h), w (weeks, 7d), y (years, 365d).
|
||||||
|
// Units can be combined without spaces, e.g., "1y2w3d4h5m6s".
|
||||||
|
// Case-insensitive. Years and days are approximations (no leap seconds/months).
|
||||||
|
// Returns an error for invalid input.
|
||||||
|
func ParseDuration(s string) (time.Duration, error) {
|
||||||
|
s = strings.ToLower(s) // Normalize to lowercase for case-insensitivity.
|
||||||
|
if s == "" {
|
||||||
|
return 0, errors.New("empty duration string")
|
||||||
|
}
|
||||||
|
|
||||||
|
var total time.Duration
|
||||||
|
i := 0
|
||||||
|
for i < len(s) {
|
||||||
|
// Parse the number part.
|
||||||
|
start := i
|
||||||
|
for i < len(s) && IsDigit(s[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if start == i {
|
||||||
|
return 0, fmt.Errorf("expected number at position %d", start)
|
||||||
|
}
|
||||||
|
numStr := s[start:i]
|
||||||
|
num, err := strconv.ParseUint(numStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid number %q: %w", numStr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the unit part.
|
||||||
|
if i >= len(s) {
|
||||||
|
return 0, fmt.Errorf("expected unit after number %q", numStr)
|
||||||
|
}
|
||||||
|
unitStart := i
|
||||||
|
i++ // Consume the first char of the unit.
|
||||||
|
unit := s[unitStart:i]
|
||||||
|
|
||||||
|
// Handle potential two-char units like "ms".
|
||||||
|
if unit == "m" && i < len(s) && s[i] == 's' {
|
||||||
|
i++ // Consume the 's'.
|
||||||
|
unit = "ms"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to duration based on unit.
|
||||||
|
var d time.Duration
|
||||||
|
switch unit {
|
||||||
|
case "ns":
|
||||||
|
d = time.Nanosecond * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "us", "µs":
|
||||||
|
d = time.Microsecond * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "ms":
|
||||||
|
d = time.Millisecond * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "s":
|
||||||
|
d = time.Second * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "m":
|
||||||
|
d = time.Minute * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "h":
|
||||||
|
d = time.Hour * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "d":
|
||||||
|
d = 24 * time.Hour * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "w":
|
||||||
|
d = 7 * 24 * time.Hour * time.Duration(num&signedaMask64) // #nosec G115 - masked off
|
||||||
|
case "y":
|
||||||
|
// Approximate, non-leap year.
|
||||||
|
d = 365 * 24 * time.Hour * time.Duration(num&signedaMask64) // #nosec G115 - masked off;
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown unit %q at position %d", s[unitStart:i], unitStart)
|
||||||
|
}
|
||||||
|
|
||||||
|
total += d
|
||||||
|
}
|
||||||
|
|
||||||
|
return total, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type HexEncodeMode uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HexEncodeLower prints the bytes as lowercase hexadecimal.
|
||||||
|
HexEncodeLower HexEncodeMode = iota + 1
|
||||||
|
// HexEncodeUpper prints the bytes as uppercase hexadecimal.
|
||||||
|
HexEncodeUpper
|
||||||
|
// HexEncodeLowerColon prints the bytes as lowercase hexadecimal
|
||||||
|
// with colons between each pair of bytes.
|
||||||
|
HexEncodeLowerColon
|
||||||
|
// HexEncodeUpperColon prints the bytes as uppercase hexadecimal
|
||||||
|
// with colons between each pair of bytes.
|
||||||
|
HexEncodeUpperColon
|
||||||
|
// HexEncodeBytes prints the string as a sequence of []byte.
|
||||||
|
HexEncodeBytes
|
||||||
|
// HexEncodeBase64 prints the string as a base64-encoded string.
|
||||||
|
HexEncodeBase64
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m HexEncodeMode) String() string {
|
||||||
|
switch m {
|
||||||
|
case HexEncodeLower:
|
||||||
|
return "lower"
|
||||||
|
case HexEncodeUpper:
|
||||||
|
return "upper"
|
||||||
|
case HexEncodeLowerColon:
|
||||||
|
return "lcolon"
|
||||||
|
case HexEncodeUpperColon:
|
||||||
|
return "ucolon"
|
||||||
|
case HexEncodeBytes:
|
||||||
|
return "bytes"
|
||||||
|
case HexEncodeBase64:
|
||||||
|
return "base64"
|
||||||
|
default:
|
||||||
|
panic("invalid hex encode mode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseHexEncodeMode(s string) HexEncodeMode {
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "lower":
|
||||||
|
return HexEncodeLower
|
||||||
|
case "upper":
|
||||||
|
return HexEncodeUpper
|
||||||
|
case "lcolon":
|
||||||
|
return HexEncodeLowerColon
|
||||||
|
case "ucolon":
|
||||||
|
return HexEncodeUpperColon
|
||||||
|
case "bytes":
|
||||||
|
return HexEncodeBytes
|
||||||
|
case "base64":
|
||||||
|
return HexEncodeBase64
|
||||||
|
}
|
||||||
|
|
||||||
|
panic("invalid hex encode mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexColons(s string) string {
|
||||||
|
if len(s)%2 != 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "hex string: %s\n", s)
|
||||||
|
fmt.Fprintf(os.Stderr, "hex length: %d\n", len(s))
|
||||||
|
panic("invalid hex string length")
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(s)
|
||||||
|
if n <= 2 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
pairCount := n / 2
|
||||||
|
if n%2 != 0 {
|
||||||
|
pairCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
var b strings.Builder
|
||||||
|
b.Grow(n + pairCount - 1)
|
||||||
|
|
||||||
|
for i := 0; i < n; i += 2 {
|
||||||
|
b.WriteByte(s[i])
|
||||||
|
|
||||||
|
if i+1 < n {
|
||||||
|
b.WriteByte(s[i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
if i+2 < n {
|
||||||
|
b.WriteByte(':')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexEncode(b []byte) string {
|
||||||
|
s := hex.EncodeToString(b)
|
||||||
|
|
||||||
|
if len(s)%2 != 0 {
|
||||||
|
s = "0" + s
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesAsByteSliceString(buf []byte) string {
|
||||||
|
sb := &strings.Builder{}
|
||||||
|
sb.WriteString("[]byte{")
|
||||||
|
for i := range buf {
|
||||||
|
fmt.Fprintf(sb, "0x%02x, ", buf[i])
|
||||||
|
}
|
||||||
|
sb.WriteString("}")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HexEncode encodes the given bytes as a hexadecimal string. It
|
||||||
|
// also supports a few other binary-encoding formats as well.
|
||||||
|
func HexEncode(b []byte, mode HexEncodeMode) string {
|
||||||
|
switch mode {
|
||||||
|
case HexEncodeLower:
|
||||||
|
return hexEncode(b)
|
||||||
|
case HexEncodeUpper:
|
||||||
|
return strings.ToUpper(hexEncode(b))
|
||||||
|
case HexEncodeLowerColon:
|
||||||
|
return hexColons(hexEncode(b))
|
||||||
|
case HexEncodeUpperColon:
|
||||||
|
return strings.ToUpper(hexColons(hexEncode(b)))
|
||||||
|
case HexEncodeBytes:
|
||||||
|
return bytesAsByteSliceString(b)
|
||||||
|
case HexEncodeBase64:
|
||||||
|
return base64.StdEncoding.EncodeToString(b)
|
||||||
|
default:
|
||||||
|
panic("invalid hex encode mode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DummyWriteCloser wraps an io.Writer in a struct with a no-op Close.
|
||||||
|
type DummyWriteCloser struct {
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithCloser(w io.Writer) io.WriteCloser {
|
||||||
|
return &DummyWriteCloser{w: w}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dwc *DummyWriteCloser) Write(p []byte) (int, error) {
|
||||||
|
return dwc.w.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dwc *DummyWriteCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
303
vendor/git.wntrmute.dev/mc/mcdsl/auth/auth.go
vendored
Normal file
303
vendor/git.wntrmute.dev/mc/mcdsl/auth/auth.go
vendored
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
// Package auth provides MCIAS token validation with caching for
|
||||||
|
// Metacircular services.
|
||||||
|
//
|
||||||
|
// Every Metacircular service delegates authentication to MCIAS. This
|
||||||
|
// package handles the login flow, token validation (with a 30-second
|
||||||
|
// SHA-256-keyed cache), and logout. It communicates directly with the
|
||||||
|
// MCIAS REST API.
|
||||||
|
//
|
||||||
|
// Security: bearer tokens are never logged or included in error messages.
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const cacheTTL = 30 * time.Second
|
||||||
|
|
||||||
|
// Errors returned by the Authenticator.
|
||||||
|
var (
|
||||||
|
// ErrInvalidToken indicates the token is expired, revoked, or otherwise
|
||||||
|
// invalid.
|
||||||
|
ErrInvalidToken = errors.New("auth: invalid token")
|
||||||
|
|
||||||
|
// ErrInvalidCredentials indicates that the username/password combination
|
||||||
|
// was rejected by MCIAS.
|
||||||
|
ErrInvalidCredentials = errors.New("auth: invalid credentials")
|
||||||
|
|
||||||
|
// ErrForbidden indicates that MCIAS login policy denied access to this
|
||||||
|
// service (HTTP 403).
|
||||||
|
ErrForbidden = errors.New("auth: forbidden by policy")
|
||||||
|
|
||||||
|
// ErrUnavailable indicates that MCIAS could not be reached.
|
||||||
|
ErrUnavailable = errors.New("auth: MCIAS unavailable")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds MCIAS connection settings. This matches the standard [mcias]
|
||||||
|
// TOML section used by all Metacircular services.
|
||||||
|
type Config struct {
|
||||||
|
// ServerURL is the base URL of the MCIAS server
|
||||||
|
// (e.g., "https://mcias.metacircular.net:8443").
|
||||||
|
ServerURL string `toml:"server_url"`
|
||||||
|
|
||||||
|
// CACert is an optional path to a PEM-encoded CA certificate for
|
||||||
|
// verifying the MCIAS server's TLS certificate.
|
||||||
|
CACert string `toml:"ca_cert"`
|
||||||
|
|
||||||
|
// ServiceName is this service's identity as registered in MCIAS. It is
|
||||||
|
// sent with every login request so MCIAS can evaluate service-context
|
||||||
|
// login policy rules.
|
||||||
|
ServiceName string `toml:"service_name"`
|
||||||
|
|
||||||
|
// Tags are sent with every login request. MCIAS evaluates auth:login
|
||||||
|
// policy against these tags (e.g., ["env:restricted"]).
|
||||||
|
Tags []string `toml:"tags"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenInfo holds the validated identity of an authenticated caller.
|
||||||
|
type TokenInfo struct {
|
||||||
|
// Username is the MCIAS username (the "sub" claim).
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// AccountType is the MCIAS account type: "human" or "system".
|
||||||
|
// Used by policy engines that need to distinguish interactive users
|
||||||
|
// from service accounts.
|
||||||
|
AccountType string
|
||||||
|
|
||||||
|
// Roles is the set of MCIAS roles assigned to the account.
|
||||||
|
Roles []string
|
||||||
|
|
||||||
|
// IsAdmin is true if the account has the "admin" role.
|
||||||
|
IsAdmin bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authenticator validates MCIAS bearer tokens with a short-lived cache.
|
||||||
|
type Authenticator struct {
|
||||||
|
httpClient *http.Client
|
||||||
|
baseURL string
|
||||||
|
serviceName string
|
||||||
|
tags []string
|
||||||
|
logger *slog.Logger
|
||||||
|
cache *validationCache
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates an Authenticator that talks to the MCIAS server described
|
||||||
|
// by cfg. TLS 1.3 is required for all HTTPS connections. If cfg.CACert
|
||||||
|
// is set, that CA certificate is added to the trust pool.
|
||||||
|
//
|
||||||
|
// For plain HTTP URLs (used in tests), TLS configuration is skipped.
|
||||||
|
func New(cfg Config, logger *slog.Logger) (*Authenticator, error) {
|
||||||
|
if cfg.ServerURL == "" {
|
||||||
|
return nil, fmt.Errorf("auth: server_url is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
transport := &http.Transport{}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(cfg.ServerURL, "http://") {
|
||||||
|
tlsCfg := &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS13,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.CACert != "" {
|
||||||
|
pem, err := os.ReadFile(cfg.CACert) //nolint:gosec // CA cert path from operator config
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("auth: read CA cert %s: %w", cfg.CACert, err)
|
||||||
|
}
|
||||||
|
pool := x509.NewCertPool()
|
||||||
|
if !pool.AppendCertsFromPEM(pem) {
|
||||||
|
return nil, fmt.Errorf("auth: no valid certificates in %s", cfg.CACert)
|
||||||
|
}
|
||||||
|
tlsCfg.RootCAs = pool
|
||||||
|
}
|
||||||
|
|
||||||
|
transport.TLSClientConfig = tlsCfg
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Authenticator{
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Transport: transport,
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
|
},
|
||||||
|
baseURL: strings.TrimRight(cfg.ServerURL, "/"),
|
||||||
|
serviceName: cfg.ServiceName,
|
||||||
|
tags: cfg.Tags,
|
||||||
|
logger: logger,
|
||||||
|
cache: newCache(cacheTTL),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Login authenticates a user against MCIAS and returns a bearer token.
|
||||||
|
// totpCode may be empty for accounts without TOTP configured.
|
||||||
|
//
|
||||||
|
// The service name and tags from Config are included in the login request
|
||||||
|
// so MCIAS can evaluate service-context login policy.
|
||||||
|
func (a *Authenticator) Login(username, password, totpCode string) (token string, expiresAt time.Time, err error) {
|
||||||
|
reqBody := map[string]interface{}{
|
||||||
|
"username": username,
|
||||||
|
"password": password,
|
||||||
|
}
|
||||||
|
if totpCode != "" {
|
||||||
|
reqBody["totp_code"] = totpCode
|
||||||
|
}
|
||||||
|
if a.serviceName != "" {
|
||||||
|
reqBody["service_name"] = a.serviceName
|
||||||
|
}
|
||||||
|
if len(a.tags) > 0 {
|
||||||
|
reqBody["tags"] = a.tags
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
ExpiresAt string `json:"expires_at"`
|
||||||
|
}
|
||||||
|
status, err := a.doJSON(http.MethodPost, "/v1/auth/login", reqBody, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return "", time.Time{}, fmt.Errorf("auth: MCIAS login: %w", ErrUnavailable)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch status {
|
||||||
|
case http.StatusOK:
|
||||||
|
// Parse the expiry time.
|
||||||
|
exp, parseErr := time.Parse(time.RFC3339, resp.ExpiresAt)
|
||||||
|
if parseErr != nil {
|
||||||
|
exp = time.Now().Add(1 * time.Hour) // fallback
|
||||||
|
}
|
||||||
|
return resp.Token, exp, nil
|
||||||
|
case http.StatusForbidden:
|
||||||
|
return "", time.Time{}, ErrForbidden
|
||||||
|
default:
|
||||||
|
return "", time.Time{}, ErrInvalidCredentials
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateToken checks a bearer token against MCIAS. Results are cached
|
||||||
|
// by the SHA-256 hash of the token for 30 seconds.
|
||||||
|
//
|
||||||
|
// Returns ErrInvalidToken if the token is expired, revoked, or otherwise
|
||||||
|
// not valid.
|
||||||
|
func (a *Authenticator) ValidateToken(token string) (*TokenInfo, error) {
|
||||||
|
h := sha256.Sum256([]byte(token))
|
||||||
|
tokenHash := hex.EncodeToString(h[:])
|
||||||
|
|
||||||
|
if info, ok := a.cache.get(tokenHash); ok {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp struct {
|
||||||
|
Valid bool `json:"valid"`
|
||||||
|
Sub string `json:"sub"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
AccountType string `json:"account_type"`
|
||||||
|
Roles []string `json:"roles"`
|
||||||
|
}
|
||||||
|
status, err := a.doJSON(http.MethodPost, "/v1/token/validate",
|
||||||
|
map[string]string{"token": token}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("auth: MCIAS validate: %w", ErrUnavailable)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK || !resp.Valid {
|
||||||
|
return nil, ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
info := &TokenInfo{
|
||||||
|
Username: resp.Username,
|
||||||
|
AccountType: resp.AccountType,
|
||||||
|
Roles: resp.Roles,
|
||||||
|
IsAdmin: hasRole(resp.Roles, "admin"),
|
||||||
|
}
|
||||||
|
if info.Username == "" {
|
||||||
|
info.Username = resp.Sub
|
||||||
|
}
|
||||||
|
|
||||||
|
a.cache.put(tokenHash, info)
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearCache removes all cached token validation results. This should be
|
||||||
|
// called when the service transitions to a state where cached tokens may
|
||||||
|
// no longer be valid (e.g., Metacrypt sealing).
|
||||||
|
func (a *Authenticator) ClearCache() {
|
||||||
|
a.cache.clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Logout revokes a token on the MCIAS server.
|
||||||
|
func (a *Authenticator) Logout(token string) error {
|
||||||
|
req, err := http.NewRequestWithContext(context.Background(),
|
||||||
|
http.MethodPost, a.baseURL+"/v1/auth/logout", nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("auth: build logout request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
|
||||||
|
resp, err := a.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("auth: MCIAS logout: %w", ErrUnavailable)
|
||||||
|
}
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doJSON makes a JSON request to the MCIAS server and decodes the response.
|
||||||
|
// It returns the HTTP status code and any transport error.
|
||||||
|
func (a *Authenticator) doJSON(method, path string, body, out interface{}) (int, error) {
|
||||||
|
var reqBody io.Reader
|
||||||
|
if body != nil {
|
||||||
|
b, err := json.Marshal(body)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("marshal request: %w", err)
|
||||||
|
}
|
||||||
|
reqBody = bytes.NewReader(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(context.Background(),
|
||||||
|
method, a.baseURL+path, reqBody)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("build request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
resp, err := a.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
|
||||||
|
if out != nil && resp.StatusCode == http.StatusOK {
|
||||||
|
respBytes, readErr := io.ReadAll(resp.Body)
|
||||||
|
if readErr != nil {
|
||||||
|
return resp.StatusCode, fmt.Errorf("read response: %w", readErr)
|
||||||
|
}
|
||||||
|
if len(respBytes) > 0 {
|
||||||
|
if decErr := json.Unmarshal(respBytes, out); decErr != nil {
|
||||||
|
return resp.StatusCode, fmt.Errorf("decode response: %w", decErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.StatusCode, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasRole(roles []string, target string) bool {
|
||||||
|
for _, r := range roles {
|
||||||
|
if r == target {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
71
vendor/git.wntrmute.dev/mc/mcdsl/auth/cache.go
vendored
Normal file
71
vendor/git.wntrmute.dev/mc/mcdsl/auth/cache.go
vendored
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cacheEntry holds a cached TokenInfo and its expiration time.
|
||||||
|
type cacheEntry struct {
|
||||||
|
info *TokenInfo
|
||||||
|
expiresAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// validationCache provides a concurrency-safe, TTL-based cache for token
|
||||||
|
// validation results. Tokens are keyed by their SHA-256 hex digest.
|
||||||
|
type validationCache struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
entries map[string]cacheEntry
|
||||||
|
ttl time.Duration
|
||||||
|
now func() time.Time // injectable clock for testing
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCache creates a validationCache with the given TTL.
|
||||||
|
func newCache(ttl time.Duration) *validationCache {
|
||||||
|
return &validationCache{
|
||||||
|
entries: make(map[string]cacheEntry),
|
||||||
|
ttl: ttl,
|
||||||
|
now: time.Now,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get returns cached TokenInfo for the given token hash, or false if
|
||||||
|
// the entry is missing or expired. Expired entries are lazily evicted.
|
||||||
|
func (c *validationCache) get(tokenHash string) (*TokenInfo, bool) {
|
||||||
|
c.mu.RLock()
|
||||||
|
entry, ok := c.entries[tokenHash]
|
||||||
|
c.mu.RUnlock()
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.now().After(entry.expiresAt) {
|
||||||
|
// Lazy evict the expired entry.
|
||||||
|
c.mu.Lock()
|
||||||
|
if e, exists := c.entries[tokenHash]; exists && c.now().After(e.expiresAt) {
|
||||||
|
delete(c.entries, tokenHash)
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry.info, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear removes all entries from the cache.
|
||||||
|
func (c *validationCache) clear() {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.entries = make(map[string]cacheEntry)
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// put stores TokenInfo in the cache with an expiration of now + TTL.
|
||||||
|
func (c *validationCache) put(tokenHash string, info *TokenInfo) {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.entries[tokenHash] = cacheEntry{
|
||||||
|
info: info,
|
||||||
|
expiresAt: c.now().Add(c.ttl),
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
19
vendor/git.wntrmute.dev/mc/mcdsl/auth/context.go
vendored
Normal file
19
vendor/git.wntrmute.dev/mc/mcdsl/auth/context.go
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
// contextKey is an unexported type used as the context key for TokenInfo,
|
||||||
|
// preventing collisions with keys from other packages.
|
||||||
|
type contextKey struct{}
|
||||||
|
|
||||||
|
// ContextWithTokenInfo returns a new context carrying the given TokenInfo.
|
||||||
|
func ContextWithTokenInfo(ctx context.Context, info *TokenInfo) context.Context {
|
||||||
|
return context.WithValue(ctx, contextKey{}, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenInfoFromContext extracts TokenInfo from the context. It returns nil
|
||||||
|
// if no TokenInfo is present.
|
||||||
|
func TokenInfoFromContext(ctx context.Context) *TokenInfo {
|
||||||
|
info, _ := ctx.Value(contextKey{}).(*TokenInfo)
|
||||||
|
return info
|
||||||
|
}
|
||||||
373
vendor/git.wntrmute.dev/mc/mcdsl/config/config.go
vendored
Normal file
373
vendor/git.wntrmute.dev/mc/mcdsl/config/config.go
vendored
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
// Package config provides TOML configuration loading with environment
|
||||||
|
// variable overrides for Metacircular services.
|
||||||
|
//
|
||||||
|
// Services define their own config struct embedding [Base], which provides
|
||||||
|
// the standard sections (Server, Database, MCIAS, Log). Use [Load] to
|
||||||
|
// parse a TOML file, apply environment overrides, set defaults, and
|
||||||
|
// validate required fields.
|
||||||
|
//
|
||||||
|
// # Duration fields
|
||||||
|
//
|
||||||
|
// Timeout fields in [ServerConfig] use the [Duration] type rather than
|
||||||
|
// [time.Duration] because go-toml v2 does not natively decode strings
|
||||||
|
// (e.g., "30s") into time.Duration. Access the underlying value via
|
||||||
|
// the embedded field:
|
||||||
|
//
|
||||||
|
// cfg.Server.ReadTimeout.Duration // time.Duration
|
||||||
|
//
|
||||||
|
// In TOML files, durations are written as Go duration strings:
|
||||||
|
//
|
||||||
|
// read_timeout = "30s"
|
||||||
|
// idle_timeout = "2m"
|
||||||
|
//
|
||||||
|
// Environment variable overrides also use this format:
|
||||||
|
//
|
||||||
|
// MCR_SERVER_READ_TIMEOUT=30s
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pelletier/go-toml/v2"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/mc/mcdsl/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Base contains the configuration sections common to all Metacircular
|
||||||
|
// services. Services embed this in their own config struct and add
|
||||||
|
// service-specific sections.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// type MyConfig struct {
|
||||||
|
// config.Base
|
||||||
|
// MyService MyServiceSection `toml:"my_service"`
|
||||||
|
// }
|
||||||
|
type Base struct {
|
||||||
|
Server ServerConfig `toml:"server"`
|
||||||
|
Database DatabaseConfig `toml:"database"`
|
||||||
|
MCIAS auth.Config `toml:"mcias"`
|
||||||
|
Log LogConfig `toml:"log"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerConfig holds TLS server settings.
|
||||||
|
type ServerConfig struct {
|
||||||
|
// ListenAddr is the HTTPS listen address (e.g., ":8443"). Required.
|
||||||
|
ListenAddr string `toml:"listen_addr"`
|
||||||
|
|
||||||
|
// GRPCAddr is the gRPC listen address (e.g., ":9443"). Optional;
|
||||||
|
// gRPC is disabled if empty.
|
||||||
|
GRPCAddr string `toml:"grpc_addr"`
|
||||||
|
|
||||||
|
// TLSCert is the path to the TLS certificate file (PEM). Required.
|
||||||
|
TLSCert string `toml:"tls_cert"`
|
||||||
|
|
||||||
|
// TLSKey is the path to the TLS private key file (PEM). Required.
|
||||||
|
TLSKey string `toml:"tls_key"`
|
||||||
|
|
||||||
|
// ReadTimeout is the maximum duration for reading the entire request.
|
||||||
|
// Defaults to 30s.
|
||||||
|
ReadTimeout Duration `toml:"read_timeout"`
|
||||||
|
|
||||||
|
// WriteTimeout is the maximum duration before timing out writes.
|
||||||
|
// Defaults to 30s.
|
||||||
|
WriteTimeout Duration `toml:"write_timeout"`
|
||||||
|
|
||||||
|
// IdleTimeout is the maximum time to wait for the next request on
|
||||||
|
// a keep-alive connection. Defaults to 120s.
|
||||||
|
IdleTimeout Duration `toml:"idle_timeout"`
|
||||||
|
|
||||||
|
// ShutdownTimeout is the maximum time to wait for in-flight requests
|
||||||
|
// to drain during graceful shutdown. Defaults to 60s.
|
||||||
|
ShutdownTimeout Duration `toml:"shutdown_timeout"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DatabaseConfig holds SQLite database settings.
|
||||||
|
type DatabaseConfig struct {
|
||||||
|
// Path is the path to the SQLite database file. Required.
|
||||||
|
Path string `toml:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogConfig holds logging settings.
|
||||||
|
type LogConfig struct {
|
||||||
|
// Level is the log level (debug, info, warn, error). Defaults to "info".
|
||||||
|
Level string `toml:"level"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WebConfig holds web UI server settings. This is not part of Base because
|
||||||
|
// not all services have a web UI — services that do can add it to their
|
||||||
|
// own config struct.
|
||||||
|
type WebConfig struct {
|
||||||
|
// ListenAddr is the web UI listen address (e.g., "127.0.0.1:8080").
|
||||||
|
ListenAddr string `toml:"listen_addr"`
|
||||||
|
|
||||||
|
// GRPCAddr is the gRPC address of the API server that the web UI
|
||||||
|
// connects to.
|
||||||
|
GRPCAddr string `toml:"grpc_addr"`
|
||||||
|
|
||||||
|
// CACert is an optional CA certificate for verifying the API server's
|
||||||
|
// TLS certificate.
|
||||||
|
CACert string `toml:"ca_cert"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validator is an optional interface that config structs can implement
|
||||||
|
// to add service-specific validation. If the config type implements
|
||||||
|
// Validator, its Validate method is called after defaults and env
|
||||||
|
// overrides are applied.
|
||||||
|
type Validator interface {
|
||||||
|
Validate() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load reads a TOML config file at path, applies environment variable
|
||||||
|
// overrides using envPrefix (e.g., "MCR" maps MCR_SERVER_LISTEN_ADDR to
|
||||||
|
// Server.ListenAddr), sets defaults for unset optional fields, and
|
||||||
|
// validates required fields.
|
||||||
|
//
|
||||||
|
// If T implements [Validator], its Validate method is called after all
|
||||||
|
// other processing.
|
||||||
|
func Load[T any](path string, envPrefix string) (*T, error) {
|
||||||
|
data, err := os.ReadFile(path) //nolint:gosec // config path is operator-supplied
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config: read %s: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cfg T
|
||||||
|
if err := toml.Unmarshal(data, &cfg); err != nil {
|
||||||
|
return nil, fmt.Errorf("config: parse %s: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if envPrefix != "" {
|
||||||
|
applyEnvToStruct(reflect.ValueOf(&cfg).Elem(), envPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
applyPortEnv(&cfg)
|
||||||
|
|
||||||
|
applyBaseDefaults(&cfg)
|
||||||
|
|
||||||
|
if err := validateBase(&cfg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := any(&cfg).(Validator); ok {
|
||||||
|
if err := v.Validate(); err != nil {
|
||||||
|
return nil, fmt.Errorf("config: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyBaseDefaults sets defaults on the embedded Base struct if present.
|
||||||
|
func applyBaseDefaults(cfg any) {
|
||||||
|
base := findBase(cfg)
|
||||||
|
if base == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if base.Server.ReadTimeout.Duration == 0 {
|
||||||
|
base.Server.ReadTimeout.Duration = 30 * time.Second
|
||||||
|
}
|
||||||
|
if base.Server.WriteTimeout.Duration == 0 {
|
||||||
|
base.Server.WriteTimeout.Duration = 30 * time.Second
|
||||||
|
}
|
||||||
|
if base.Server.IdleTimeout.Duration == 0 {
|
||||||
|
base.Server.IdleTimeout.Duration = 120 * time.Second
|
||||||
|
}
|
||||||
|
if base.Server.ShutdownTimeout.Duration == 0 {
|
||||||
|
base.Server.ShutdownTimeout.Duration = 60 * time.Second
|
||||||
|
}
|
||||||
|
if base.Log.Level == "" {
|
||||||
|
base.Log.Level = "info"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateBase checks required fields on the embedded Base struct if present.
|
||||||
|
func validateBase(cfg any) error {
|
||||||
|
base := findBase(cfg)
|
||||||
|
if base == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
required := []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
}{
|
||||||
|
{"server.listen_addr", base.Server.ListenAddr},
|
||||||
|
{"server.tls_cert", base.Server.TLSCert},
|
||||||
|
{"server.tls_key", base.Server.TLSKey},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range required {
|
||||||
|
if r.value == "" {
|
||||||
|
return fmt.Errorf("config: required field %q is missing", r.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findBase returns a pointer to the embedded Base struct, or nil if the
|
||||||
|
// config type does not embed Base.
|
||||||
|
func findBase(cfg any) *Base {
|
||||||
|
v := reflect.ValueOf(cfg)
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
if v.Kind() != reflect.Struct {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if cfg *is* a Base.
|
||||||
|
if b, ok := v.Addr().Interface().(*Base); ok {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check embedded fields.
|
||||||
|
t := v.Type()
|
||||||
|
for i := range t.NumField() {
|
||||||
|
field := t.Field(i)
|
||||||
|
if field.Anonymous && field.Type == reflect.TypeOf(Base{}) {
|
||||||
|
b, ok := v.Field(i).Addr().Interface().(*Base)
|
||||||
|
if ok {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyPortEnv overrides ServerConfig.ListenAddr and ServerConfig.GRPCAddr
|
||||||
|
// from $PORT and $PORT_GRPC respectively. These environment variables are
|
||||||
|
// set by the MCP agent to assign authoritative port bindings, so they take
|
||||||
|
// precedence over both TOML values and generic env overrides.
|
||||||
|
func applyPortEnv(cfg any) {
|
||||||
|
sc := findServerConfig(cfg)
|
||||||
|
if sc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if port, ok := os.LookupEnv("PORT"); ok {
|
||||||
|
sc.ListenAddr = ":" + port
|
||||||
|
}
|
||||||
|
if port, ok := os.LookupEnv("PORT_GRPC"); ok {
|
||||||
|
sc.GRPCAddr = ":" + port
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findServerConfig returns a pointer to the ServerConfig in the config
|
||||||
|
// struct. It first checks for an embedded Base (which contains Server),
|
||||||
|
// then walks the struct tree via reflection to find any ServerConfig field
|
||||||
|
// directly (e.g., the Metacrypt pattern where ServerConfig is embedded
|
||||||
|
// without Base).
|
||||||
|
func findServerConfig(cfg any) *ServerConfig {
|
||||||
|
if base := findBase(cfg); base != nil {
|
||||||
|
return &base.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
return findServerConfigReflect(reflect.ValueOf(cfg))
|
||||||
|
}
|
||||||
|
|
||||||
|
// findServerConfigReflect walks the struct tree to find a ServerConfig field.
|
||||||
|
func findServerConfigReflect(v reflect.Value) *ServerConfig {
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
if v.Kind() != reflect.Struct {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
scType := reflect.TypeOf(ServerConfig{})
|
||||||
|
t := v.Type()
|
||||||
|
for i := range t.NumField() {
|
||||||
|
field := t.Field(i)
|
||||||
|
fv := v.Field(i)
|
||||||
|
|
||||||
|
if field.Type == scType {
|
||||||
|
sc, ok := fv.Addr().Interface().(*ServerConfig)
|
||||||
|
if ok {
|
||||||
|
return sc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recurse into embedded or nested structs.
|
||||||
|
if fv.Kind() == reflect.Struct && field.Type != scType {
|
||||||
|
if sc := findServerConfigReflect(fv); sc != nil {
|
||||||
|
return sc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyEnvToStruct recursively walks a struct and overrides field values
|
||||||
|
// from environment variables. The env variable name is built from the
|
||||||
|
// prefix and the toml tag: PREFIX_SECTION_FIELD (uppercased).
|
||||||
|
//
|
||||||
|
// Supported field types: string, time.Duration (as int64), []string
|
||||||
|
// (comma-separated), bool, int.
|
||||||
|
func applyEnvToStruct(v reflect.Value, prefix string) {
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
t := v.Type()
|
||||||
|
|
||||||
|
for i := range t.NumField() {
|
||||||
|
field := t.Field(i)
|
||||||
|
fv := v.Field(i)
|
||||||
|
|
||||||
|
// For anonymous (embedded) fields, recurse with the same prefix.
|
||||||
|
if field.Anonymous {
|
||||||
|
applyEnvToStruct(fv, prefix)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tag := field.Tag.Get("toml")
|
||||||
|
if tag == "" || tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
envKey := prefix + "_" + strings.ToUpper(tag)
|
||||||
|
|
||||||
|
// Handle Duration wrapper before generic struct recursion.
|
||||||
|
if field.Type == reflect.TypeOf(Duration{}) {
|
||||||
|
envVal, ok := os.LookupEnv(envKey)
|
||||||
|
if ok {
|
||||||
|
d, parseErr := time.ParseDuration(envVal)
|
||||||
|
if parseErr == nil {
|
||||||
|
fv.Set(reflect.ValueOf(Duration{d}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if field.Type.Kind() == reflect.Struct {
|
||||||
|
applyEnvToStruct(fv, envKey)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
envVal, ok := os.LookupEnv(envKey)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fv.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
fv.SetString(envVal)
|
||||||
|
case reflect.Bool:
|
||||||
|
fv.SetBool(envVal == "true" || envVal == "1")
|
||||||
|
case reflect.Slice:
|
||||||
|
if field.Type.Elem().Kind() == reflect.String {
|
||||||
|
parts := strings.Split(envVal, ",")
|
||||||
|
for j := range parts {
|
||||||
|
parts[j] = strings.TrimSpace(parts[j])
|
||||||
|
}
|
||||||
|
fv.Set(reflect.ValueOf(parts))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
37
vendor/git.wntrmute.dev/mc/mcdsl/config/duration.go
vendored
Normal file
37
vendor/git.wntrmute.dev/mc/mcdsl/config/duration.go
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Duration is a [time.Duration] that can be unmarshalled from a TOML string
|
||||||
|
// (e.g., "30s", "5m"). go-toml v2 does not natively decode strings into
|
||||||
|
// time.Duration, so this wrapper implements [encoding.TextUnmarshaler].
|
||||||
|
//
|
||||||
|
// Access the underlying time.Duration via the embedded field:
|
||||||
|
//
|
||||||
|
// cfg.Server.ReadTimeout.Duration // time.Duration value
|
||||||
|
//
|
||||||
|
// Duration values work directly with time functions that accept
|
||||||
|
// time.Duration because of the embedding:
|
||||||
|
//
|
||||||
|
// time.After(cfg.Server.ReadTimeout.Duration)
|
||||||
|
type Duration struct {
|
||||||
|
time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler for TOML string decoding.
|
||||||
|
func (d *Duration) UnmarshalText(text []byte) error {
|
||||||
|
parsed, err := time.ParseDuration(string(text))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid duration %q: %w", string(text), err)
|
||||||
|
}
|
||||||
|
d.Duration = parsed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler for TOML string encoding.
|
||||||
|
func (d Duration) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(d.String()), nil
|
||||||
|
}
|
||||||
144
vendor/git.wntrmute.dev/mc/mcdsl/csrf/csrf.go
vendored
Normal file
144
vendor/git.wntrmute.dev/mc/mcdsl/csrf/csrf.go
vendored
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
// Package csrf provides HMAC-SHA256 double-submit cookie CSRF protection
|
||||||
|
// for Metacircular web UIs.
|
||||||
|
//
|
||||||
|
// The token format is base64(nonce) + "." + base64(HMAC-SHA256(secret, nonce)).
|
||||||
|
// A fresh token is set as a cookie on each page load. Mutating requests
|
||||||
|
// (POST, PUT, PATCH, DELETE) must include the token as a form field that
|
||||||
|
// matches the cookie value. Both the match and the HMAC signature are
|
||||||
|
// verified.
|
||||||
|
package csrf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"html/template"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Protect provides CSRF token generation, validation, and middleware.
|
||||||
|
type Protect struct {
|
||||||
|
secret [32]byte
|
||||||
|
cookieName string
|
||||||
|
fieldName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a Protect with the given secret, cookie name, and form
|
||||||
|
// field name. The secret must be 32 bytes from crypto/rand and should
|
||||||
|
// be unique per service instance.
|
||||||
|
//
|
||||||
|
// Typical usage:
|
||||||
|
//
|
||||||
|
// secret := make([]byte, 32)
|
||||||
|
// crypto_rand.Read(secret)
|
||||||
|
// csrf := csrf.New(secret, "myservice_csrf", "csrf_token")
|
||||||
|
func New(secret []byte, cookieName, fieldName string) *Protect {
|
||||||
|
p := &Protect{
|
||||||
|
cookieName: cookieName,
|
||||||
|
fieldName: fieldName,
|
||||||
|
}
|
||||||
|
copy(p.secret[:], secret)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Middleware validates CSRF tokens on mutating requests (POST, PUT,
|
||||||
|
// PATCH, DELETE). Safe methods (GET, HEAD, OPTIONS) pass through.
|
||||||
|
// Returns 403 Forbidden if the token is missing, mismatched, or invalid.
|
||||||
|
func (p *Protect) Middleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.Method {
|
||||||
|
case http.MethodGet, http.MethodHead, http.MethodOptions:
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
formToken := r.FormValue(p.fieldName) //nolint:gosec // form size is bounded by the http.Server's MaxBytesReader or ReadTimeout
|
||||||
|
cookie, err := r.Cookie(p.cookieName)
|
||||||
|
if err != nil || cookie.Value == "" || formToken == "" {
|
||||||
|
http.Error(w, "forbidden", http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if formToken != cookie.Value {
|
||||||
|
http.Error(w, "forbidden", http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !p.validateToken(formToken) {
|
||||||
|
http.Error(w, "forbidden", http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetToken generates a new CSRF token, sets it as a cookie on the
|
||||||
|
// response, and returns the token string. Call this when rendering
|
||||||
|
// pages that contain forms.
|
||||||
|
func (p *Protect) SetToken(w http.ResponseWriter) string {
|
||||||
|
token := p.generateToken()
|
||||||
|
http.SetCookie(w, &http.Cookie{
|
||||||
|
Name: p.cookieName,
|
||||||
|
Value: token,
|
||||||
|
Path: "/",
|
||||||
|
HttpOnly: true,
|
||||||
|
Secure: true,
|
||||||
|
SameSite: http.SameSiteStrictMode,
|
||||||
|
})
|
||||||
|
return token
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateFunc returns a [template.FuncMap] containing a "csrfField"
|
||||||
|
// function that renders a hidden input with the CSRF token. It calls
|
||||||
|
// SetToken to set the cookie. Use in template rendering:
|
||||||
|
//
|
||||||
|
// tmpl.Funcs(csrf.TemplateFunc(w))
|
||||||
|
//
|
||||||
|
// In templates:
|
||||||
|
//
|
||||||
|
// <form method="POST">
|
||||||
|
// {{ csrfField }}
|
||||||
|
// ...
|
||||||
|
// </form>
|
||||||
|
func (p *Protect) TemplateFunc(w http.ResponseWriter) template.FuncMap {
|
||||||
|
token := p.SetToken(w)
|
||||||
|
return template.FuncMap{
|
||||||
|
"csrfField": func() template.HTML {
|
||||||
|
return template.HTML(fmt.Sprintf( //nolint:gosec // output is escaped field name + validated token
|
||||||
|
`<input type="hidden" name="%s" value="%s">`,
|
||||||
|
template.HTMLEscapeString(p.fieldName),
|
||||||
|
template.HTMLEscapeString(token),
|
||||||
|
))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Protect) generateToken() string {
|
||||||
|
nonce := make([]byte, 32)
|
||||||
|
if _, err := rand.Read(nonce); err != nil {
|
||||||
|
panic("csrf: failed to read random bytes: " + err.Error())
|
||||||
|
}
|
||||||
|
mac := hmac.New(sha256.New, p.secret[:])
|
||||||
|
mac.Write(nonce)
|
||||||
|
sig := mac.Sum(nil)
|
||||||
|
return base64.StdEncoding.EncodeToString(nonce) + "." + base64.StdEncoding.EncodeToString(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Protect) validateToken(token string) bool {
|
||||||
|
parts := strings.SplitN(token, ".", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
nonce, err := base64.StdEncoding.DecodeString(parts[0])
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sig, err := base64.StdEncoding.DecodeString(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
mac := hmac.New(sha256.New, p.secret[:])
|
||||||
|
mac.Write(nonce)
|
||||||
|
return hmac.Equal(sig, mac.Sum(nil))
|
||||||
|
}
|
||||||
187
vendor/git.wntrmute.dev/mc/mcdsl/db/db.go
vendored
Normal file
187
vendor/git.wntrmute.dev/mc/mcdsl/db/db.go
vendored
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
// Package db provides SQLite database setup, migrations, and snapshots
|
||||||
|
// for Metacircular services.
|
||||||
|
//
|
||||||
|
// All databases are opened with the standard Metacircular pragmas (WAL mode,
|
||||||
|
// foreign keys, busy timeout) and restrictive file permissions (0600).
|
||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "modernc.org/sqlite" // SQLite driver (pure Go, no CGo).
|
||||||
|
)
|
||||||
|
|
||||||
|
// Open opens or creates a SQLite database at path with the standard
|
||||||
|
// Metacircular pragmas:
|
||||||
|
//
|
||||||
|
// PRAGMA journal_mode = WAL;
|
||||||
|
// PRAGMA foreign_keys = ON;
|
||||||
|
// PRAGMA busy_timeout = 5000;
|
||||||
|
//
|
||||||
|
// The file is created with 0600 permissions (owner read/write only).
|
||||||
|
// The parent directory is created if it does not exist.
|
||||||
|
//
|
||||||
|
// Open returns a standard [*sql.DB] — no wrapper types. Services use it
|
||||||
|
// directly with database/sql.
|
||||||
|
func Open(path string) (*sql.DB, error) {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("db: create directory %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-create the file with restrictive permissions if it does not exist.
|
||||||
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
f, createErr := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600) //nolint:gosec // path is caller-provided config, not user input
|
||||||
|
if createErr != nil {
|
||||||
|
return nil, fmt.Errorf("db: create file %s: %w", path, createErr)
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
database, err := sql.Open("sqlite", path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("db: open %s: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pragmas := []string{
|
||||||
|
"PRAGMA journal_mode = WAL",
|
||||||
|
"PRAGMA foreign_keys = ON",
|
||||||
|
"PRAGMA busy_timeout = 5000",
|
||||||
|
}
|
||||||
|
for _, p := range pragmas {
|
||||||
|
if _, execErr := database.Exec(p); execErr != nil {
|
||||||
|
_ = database.Close()
|
||||||
|
return nil, fmt.Errorf("db: %s: %w", p, execErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SQLite supports concurrent readers but only one writer. With WAL mode,
|
||||||
|
// reads don't block writes, but multiple Go connections competing for
|
||||||
|
// the write lock causes SQLITE_BUSY under concurrent load. Limit to one
|
||||||
|
// connection to serialize all access and eliminate busy errors.
|
||||||
|
database.SetMaxOpenConns(1)
|
||||||
|
|
||||||
|
// Ensure permissions are correct even if the file already existed.
|
||||||
|
if err := os.Chmod(path, 0600); err != nil {
|
||||||
|
_ = database.Close()
|
||||||
|
return nil, fmt.Errorf("db: chmod %s: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return database, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migration is a numbered, named schema change. Services define their
|
||||||
|
// migrations as a []Migration slice — the slice is the schema history.
|
||||||
|
type Migration struct {
|
||||||
|
// Version is the migration number. Must be unique and should be
|
||||||
|
// sequential starting from 1.
|
||||||
|
Version int
|
||||||
|
|
||||||
|
// Name is a short human-readable description (e.g., "initial schema").
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// SQL is the DDL/DML to execute. Multiple statements are allowed
|
||||||
|
// (separated by semicolons). Each migration runs in a transaction.
|
||||||
|
SQL string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migrate applies all pending migrations from the given slice. It creates
|
||||||
|
// the schema_migrations tracking table if it does not exist.
|
||||||
|
//
|
||||||
|
// Each migration runs in its own transaction. Already-applied migrations
|
||||||
|
// (identified by version number) are skipped. Timestamps are stored as
|
||||||
|
// RFC 3339 UTC.
|
||||||
|
func Migrate(database *sql.DB, migrations []Migration) error {
|
||||||
|
_, err := database.Exec(`CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||||
|
version INTEGER PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL DEFAULT '',
|
||||||
|
applied_at TEXT NOT NULL DEFAULT ''
|
||||||
|
)`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("db: create schema_migrations: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range migrations {
|
||||||
|
applied, checkErr := migrationApplied(database, m.Version)
|
||||||
|
if checkErr != nil {
|
||||||
|
return checkErr
|
||||||
|
}
|
||||||
|
if applied {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, txErr := database.Begin()
|
||||||
|
if txErr != nil {
|
||||||
|
return fmt.Errorf("db: begin migration %d (%s): %w", m.Version, m.Name, txErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, execErr := tx.Exec(m.SQL); execErr != nil {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
return fmt.Errorf("db: migration %d (%s): %w", m.Version, m.Name, execErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UTC().Format(time.RFC3339)
|
||||||
|
if _, execErr := tx.Exec(
|
||||||
|
`INSERT INTO schema_migrations (version, name, applied_at) VALUES (?, ?, ?)`,
|
||||||
|
m.Version, m.Name, now,
|
||||||
|
); execErr != nil {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
return fmt.Errorf("db: record migration %d: %w", m.Version, execErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if commitErr := tx.Commit(); commitErr != nil {
|
||||||
|
return fmt.Errorf("db: commit migration %d: %w", m.Version, commitErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchemaVersion returns the highest applied migration version, or 0 if
|
||||||
|
// no migrations have been applied.
|
||||||
|
func SchemaVersion(database *sql.DB) (int, error) {
|
||||||
|
var version sql.NullInt64
|
||||||
|
err := database.QueryRow(`SELECT MAX(version) FROM schema_migrations`).Scan(&version)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("db: schema version: %w", err)
|
||||||
|
}
|
||||||
|
if !version.Valid {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return int(version.Int64), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot creates a consistent backup of the database at destPath using
|
||||||
|
// SQLite's VACUUM INTO. The destination file is created with 0600
|
||||||
|
// permissions.
|
||||||
|
func Snapshot(database *sql.DB, destPath string) error {
|
||||||
|
dir := filepath.Dir(destPath)
|
||||||
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
|
return fmt.Errorf("db: create snapshot directory %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := database.Exec("VACUUM INTO ?", destPath); err != nil {
|
||||||
|
return fmt.Errorf("db: snapshot: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chmod(destPath, 0600); err != nil {
|
||||||
|
return fmt.Errorf("db: chmod snapshot %s: %w", destPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func migrationApplied(database *sql.DB, version int) (bool, error) {
|
||||||
|
var count int
|
||||||
|
err := database.QueryRow(
|
||||||
|
`SELECT COUNT(*) FROM schema_migrations WHERE version = ?`, version,
|
||||||
|
).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("db: check migration %d: %w", version, err)
|
||||||
|
}
|
||||||
|
return count > 0, nil
|
||||||
|
}
|
||||||
216
vendor/git.wntrmute.dev/mc/mcdsl/grpcserver/server.go
vendored
Normal file
216
vendor/git.wntrmute.dev/mc/mcdsl/grpcserver/server.go
vendored
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
// Package grpcserver provides gRPC server setup with TLS, interceptor
|
||||||
|
// chain, and method-map authentication for Metacircular services.
|
||||||
|
//
|
||||||
|
// Access control is enforced via a [MethodMap] that classifies each RPC
|
||||||
|
// as public, auth-required, or admin-required. Methods not listed in any
|
||||||
|
// map are denied by default — forgetting to register a new RPC results
|
||||||
|
// in a denied request, not an open one.
|
||||||
|
package grpcserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/mc/mcdsl/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MethodMap classifies gRPC methods for access control.
|
||||||
|
type MethodMap struct {
|
||||||
|
// Public methods require no authentication.
|
||||||
|
Public map[string]bool
|
||||||
|
|
||||||
|
// AuthRequired methods require a valid MCIAS bearer token.
|
||||||
|
AuthRequired map[string]bool
|
||||||
|
|
||||||
|
// AdminRequired methods require a valid token with the admin role.
|
||||||
|
AdminRequired map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server wraps a grpc.Server with Metacircular auth interceptors.
|
||||||
|
type Server struct {
|
||||||
|
// GRPCServer is the underlying grpc.Server. Services register their
|
||||||
|
// implementations on it before calling Serve.
|
||||||
|
GRPCServer *grpc.Server
|
||||||
|
|
||||||
|
// Logger is used by the logging interceptor.
|
||||||
|
Logger *slog.Logger
|
||||||
|
|
||||||
|
listener net.Listener
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options configures optional behavior for the gRPC server.
|
||||||
|
type Options struct {
|
||||||
|
// PreInterceptors run before the logging and auth interceptors.
|
||||||
|
// Use for lifecycle gates like seal checks that should reject
|
||||||
|
// requests before any auth validation occurs.
|
||||||
|
PreInterceptors []grpc.UnaryServerInterceptor
|
||||||
|
|
||||||
|
// PostInterceptors run after auth but before the handler.
|
||||||
|
// Use for audit logging, rate limiting, or other cross-cutting
|
||||||
|
// concerns that need access to the authenticated identity.
|
||||||
|
PostInterceptors []grpc.UnaryServerInterceptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a gRPC server with TLS (if certFile and keyFile are
|
||||||
|
// non-empty) and an interceptor chain:
|
||||||
|
//
|
||||||
|
// [pre-interceptors] → logging → auth → [post-interceptors] → handler
|
||||||
|
//
|
||||||
|
// The auth interceptor uses methods to determine the access level for
|
||||||
|
// each RPC. Methods not in any map are denied by default.
|
||||||
|
//
|
||||||
|
// If certFile and keyFile are empty, TLS is skipped (for testing).
|
||||||
|
// opts is optional; pass nil for the default chain (logging + auth only).
|
||||||
|
func New(certFile, keyFile string, authenticator *auth.Authenticator, methods MethodMap, logger *slog.Logger, opts *Options) (*Server, error) {
|
||||||
|
var interceptors []grpc.UnaryServerInterceptor
|
||||||
|
if opts != nil {
|
||||||
|
interceptors = append(interceptors, opts.PreInterceptors...)
|
||||||
|
}
|
||||||
|
interceptors = append(interceptors,
|
||||||
|
loggingInterceptor(logger),
|
||||||
|
authInterceptor(authenticator, methods),
|
||||||
|
)
|
||||||
|
if opts != nil {
|
||||||
|
interceptors = append(interceptors, opts.PostInterceptors...)
|
||||||
|
}
|
||||||
|
chain := grpc.ChainUnaryInterceptor(interceptors...)
|
||||||
|
|
||||||
|
var serverOpts []grpc.ServerOption
|
||||||
|
serverOpts = append(serverOpts, chain)
|
||||||
|
|
||||||
|
if certFile != "" && keyFile != "" {
|
||||||
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("grpcserver: load TLS cert: %w", err)
|
||||||
|
}
|
||||||
|
tlsCfg := &tls.Config{
|
||||||
|
Certificates: []tls.Certificate{cert},
|
||||||
|
MinVersion: tls.VersionTLS13,
|
||||||
|
}
|
||||||
|
serverOpts = append(serverOpts, grpc.Creds(credentials.NewTLS(tlsCfg)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Server{
|
||||||
|
GRPCServer: grpc.NewServer(serverOpts...),
|
||||||
|
Logger: logger,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve starts the gRPC server on the given address. It blocks until
|
||||||
|
// the server is stopped.
|
||||||
|
func (s *Server) Serve(addr string) error {
|
||||||
|
lis, err := net.Listen("tcp", addr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("grpcserver: listen %s: %w", addr, err)
|
||||||
|
}
|
||||||
|
s.listener = lis
|
||||||
|
s.Logger.Info("starting gRPC server", "addr", addr)
|
||||||
|
return s.GRPCServer.Serve(lis)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop gracefully stops the gRPC server, waiting for in-flight RPCs
|
||||||
|
// to complete.
|
||||||
|
func (s *Server) Stop() {
|
||||||
|
s.GRPCServer.GracefulStop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenInfoFromContext extracts [auth.TokenInfo] from a gRPC request
|
||||||
|
// context. Returns nil if no token info is present (e.g., for public
|
||||||
|
// methods).
|
||||||
|
func TokenInfoFromContext(ctx context.Context) *auth.TokenInfo {
|
||||||
|
return auth.TokenInfoFromContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// loggingInterceptor logs each RPC after it completes.
|
||||||
|
func loggingInterceptor(logger *slog.Logger) grpc.UnaryServerInterceptor {
|
||||||
|
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||||
|
start := time.Now()
|
||||||
|
resp, err := handler(ctx, req)
|
||||||
|
code := status.Code(err)
|
||||||
|
logger.Info("grpc",
|
||||||
|
"method", info.FullMethod,
|
||||||
|
"code", code.String(),
|
||||||
|
"duration", time.Since(start),
|
||||||
|
)
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// authInterceptor enforces access control based on the MethodMap.
|
||||||
|
//
|
||||||
|
// Evaluation order:
|
||||||
|
// 1. Public → pass through, no auth.
|
||||||
|
// 2. AdminRequired → validate token, require IsAdmin.
|
||||||
|
// 3. AuthRequired → validate token.
|
||||||
|
// 4. Not in any map → deny (default deny).
|
||||||
|
func authInterceptor(authenticator *auth.Authenticator, methods MethodMap) grpc.UnaryServerInterceptor {
|
||||||
|
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||||
|
method := info.FullMethod
|
||||||
|
|
||||||
|
// Public methods: no auth.
|
||||||
|
if methods.Public[method] {
|
||||||
|
return handler(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// All other methods require a valid token.
|
||||||
|
tokenInfo, err := extractAndValidate(ctx, authenticator)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Admin-required methods: check admin role.
|
||||||
|
if methods.AdminRequired[method] {
|
||||||
|
if !tokenInfo.IsAdmin {
|
||||||
|
return nil, status.Errorf(codes.PermissionDenied, "admin role required")
|
||||||
|
}
|
||||||
|
ctx = auth.ContextWithTokenInfo(ctx, tokenInfo)
|
||||||
|
return handler(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auth-required methods: token is sufficient.
|
||||||
|
if methods.AuthRequired[method] {
|
||||||
|
ctx = auth.ContextWithTokenInfo(ctx, tokenInfo)
|
||||||
|
return handler(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default deny: method not in any map.
|
||||||
|
return nil, status.Errorf(codes.PermissionDenied, "method not authorized")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractAndValidate extracts the bearer token from gRPC metadata and
|
||||||
|
// validates it via the Authenticator.
|
||||||
|
func extractAndValidate(ctx context.Context, authenticator *auth.Authenticator) (*auth.TokenInfo, error) {
|
||||||
|
md, ok := metadata.FromIncomingContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
return nil, status.Errorf(codes.Unauthenticated, "missing metadata")
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := md.Get("authorization")
|
||||||
|
if len(vals) == 0 {
|
||||||
|
return nil, status.Errorf(codes.Unauthenticated, "missing authorization header")
|
||||||
|
}
|
||||||
|
|
||||||
|
token := vals[0]
|
||||||
|
const bearerPrefix = "Bearer "
|
||||||
|
if len(token) > len(bearerPrefix) && token[:len(bearerPrefix)] == bearerPrefix {
|
||||||
|
token = token[len(bearerPrefix):]
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := authenticator.ValidateToken(token)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Unauthenticated, "invalid token")
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
48
vendor/git.wntrmute.dev/mc/mcdsl/health/health.go
vendored
Normal file
48
vendor/git.wntrmute.dev/mc/mcdsl/health/health.go
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
// Package health provides standard health check implementations for
|
||||||
|
// Metacircular services, supporting both REST and gRPC.
|
||||||
|
package health
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/health"
|
||||||
|
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler returns an http.HandlerFunc that checks database connectivity.
|
||||||
|
// It returns 200 {"status":"ok"} if the database is reachable, or
|
||||||
|
// 503 {"status":"unhealthy","error":"..."} if the ping fails.
|
||||||
|
//
|
||||||
|
// Mount it on whatever path the service uses (typically /healthz or
|
||||||
|
// /v1/health).
|
||||||
|
func Handler(database *sql.DB) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
if err := database.Ping(); err != nil {
|
||||||
|
w.WriteHeader(http.StatusServiceUnavailable)
|
||||||
|
_ = json.NewEncoder(w).Encode(map[string]string{
|
||||||
|
"status": "unhealthy",
|
||||||
|
"error": err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_ = json.NewEncoder(w).Encode(map[string]string{
|
||||||
|
"status": "ok",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterGRPC registers the standard gRPC health checking service
|
||||||
|
// (grpc.health.v1.Health) on the given gRPC server. The health server
|
||||||
|
// is set to SERVING status immediately.
|
||||||
|
func RegisterGRPC(srv *grpc.Server) {
|
||||||
|
hs := health.NewServer()
|
||||||
|
hs.SetServingStatus("", healthpb.HealthCheckResponse_SERVING)
|
||||||
|
healthpb.RegisterHealthServer(srv, hs)
|
||||||
|
}
|
||||||
121
vendor/git.wntrmute.dev/mc/mcdsl/httpserver/server.go
vendored
Normal file
121
vendor/git.wntrmute.dev/mc/mcdsl/httpserver/server.go
vendored
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
// Package httpserver provides TLS HTTP server setup with chi, standard
|
||||||
|
// middleware, and graceful shutdown for Metacircular services.
|
||||||
|
package httpserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/mc/mcdsl/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server wraps a chi router and an http.Server with the standard
|
||||||
|
// Metacircular TLS configuration.
|
||||||
|
type Server struct {
|
||||||
|
// Router is the chi router. Services register their routes on it.
|
||||||
|
Router *chi.Mux
|
||||||
|
|
||||||
|
// Logger is used by the logging middleware.
|
||||||
|
Logger *slog.Logger
|
||||||
|
|
||||||
|
httpSrv *http.Server
|
||||||
|
cfg config.ServerConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a Server configured from cfg. The underlying http.Server
|
||||||
|
// is configured with TLS 1.3 minimum and timeouts from the config.
|
||||||
|
// Services access s.Router to register routes before calling
|
||||||
|
// ListenAndServeTLS.
|
||||||
|
func New(cfg config.ServerConfig, logger *slog.Logger) *Server {
|
||||||
|
r := chi.NewRouter()
|
||||||
|
|
||||||
|
s := &Server{
|
||||||
|
Router: r,
|
||||||
|
Logger: logger,
|
||||||
|
cfg: cfg,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.httpSrv = &http.Server{
|
||||||
|
Addr: cfg.ListenAddr,
|
||||||
|
Handler: r,
|
||||||
|
TLSConfig: &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS13,
|
||||||
|
},
|
||||||
|
ReadTimeout: cfg.ReadTimeout.Duration,
|
||||||
|
WriteTimeout: cfg.WriteTimeout.Duration,
|
||||||
|
IdleTimeout: cfg.IdleTimeout.Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenAndServeTLS starts the HTTPS server using the TLS certificate and
|
||||||
|
// key from the config. It blocks until the server is shut down. Returns
|
||||||
|
// nil if the server was shut down gracefully via [Server.Shutdown].
|
||||||
|
func (s *Server) ListenAndServeTLS() error {
|
||||||
|
s.Logger.Info("starting server", "addr", s.cfg.ListenAddr)
|
||||||
|
err := s.httpSrv.ListenAndServeTLS(s.cfg.TLSCert, s.cfg.TLSKey)
|
||||||
|
if err != nil && err != http.ErrServerClosed {
|
||||||
|
return fmt.Errorf("httpserver: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown gracefully shuts down the server, waiting for in-flight
|
||||||
|
// requests to complete. The provided context controls the shutdown
|
||||||
|
// timeout.
|
||||||
|
func (s *Server) Shutdown(ctx context.Context) error {
|
||||||
|
return s.httpSrv.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoggingMiddleware logs each HTTP request after it completes, including
|
||||||
|
// method, path, status code, duration, and remote address.
|
||||||
|
func (s *Server) LoggingMiddleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
start := time.Now()
|
||||||
|
sw := &StatusWriter{ResponseWriter: w, Status: http.StatusOK}
|
||||||
|
next.ServeHTTP(sw, r)
|
||||||
|
s.Logger.Info("http",
|
||||||
|
"method", r.Method,
|
||||||
|
"path", r.URL.Path,
|
||||||
|
"status", sw.Status,
|
||||||
|
"duration", time.Since(start),
|
||||||
|
"remote", r.RemoteAddr,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusWriter wraps an http.ResponseWriter to capture the status code.
|
||||||
|
// It is exported for use in custom middleware.
|
||||||
|
type StatusWriter struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
// Status is the HTTP status code written to the response.
|
||||||
|
Status int
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeader captures the status code and delegates to the underlying
|
||||||
|
// ResponseWriter.
|
||||||
|
func (w *StatusWriter) WriteHeader(code int) {
|
||||||
|
w.Status = code
|
||||||
|
w.ResponseWriter.WriteHeader(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteJSON writes v as JSON with the given HTTP status code.
|
||||||
|
func WriteJSON(w http.ResponseWriter, status int, v any) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(status)
|
||||||
|
_ = json.NewEncoder(w).Encode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteError writes a standard Metacircular error response:
|
||||||
|
// {"error": "message"}.
|
||||||
|
func WriteError(w http.ResponseWriter, status int, message string) {
|
||||||
|
WriteJSON(w, status, map[string]string{"error": message})
|
||||||
|
}
|
||||||
304
vendor/git.wntrmute.dev/mc/mcdsl/sso/sso.go
vendored
Normal file
304
vendor/git.wntrmute.dev/mc/mcdsl/sso/sso.go
vendored
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
// Package sso provides an SSO redirect client for Metacircular web services.
|
||||||
|
//
|
||||||
|
// Services redirect unauthenticated users to MCIAS for login. After
|
||||||
|
// authentication, MCIAS redirects back with an authorization code that
|
||||||
|
// the service exchanges for a JWT token. This package handles the
|
||||||
|
// redirect, state management, and code exchange.
|
||||||
|
//
|
||||||
|
// Security design:
|
||||||
|
// - State cookies use SameSite=Lax (not Strict) because the redirect from
|
||||||
|
// MCIAS back to the service is a cross-site navigation.
|
||||||
|
// - State is a 256-bit random value stored in an HttpOnly cookie.
|
||||||
|
// - Return-to URLs are stored in a separate cookie so MCIAS never sees them.
|
||||||
|
// - The code exchange is a server-to-server HTTPS call (TLS 1.3 minimum).
|
||||||
|
package sso
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
stateBytes = 32 // 256 bits
|
||||||
|
stateCookieAge = 5 * 60 // 5 minutes in seconds
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds the SSO client configuration. The values must match the
|
||||||
|
// SSO client registration in MCIAS config.
|
||||||
|
type Config struct {
|
||||||
|
// MciasURL is the base URL of the MCIAS server.
|
||||||
|
MciasURL string
|
||||||
|
|
||||||
|
// ClientID is the registered SSO client identifier.
|
||||||
|
ClientID string
|
||||||
|
|
||||||
|
// RedirectURI is the callback URL that MCIAS redirects to after login.
|
||||||
|
// Must exactly match the redirect_uri registered in MCIAS config.
|
||||||
|
RedirectURI string
|
||||||
|
|
||||||
|
// CACert is an optional path to a PEM-encoded CA certificate for
|
||||||
|
// verifying the MCIAS server's TLS certificate.
|
||||||
|
CACert string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client handles the SSO redirect flow with MCIAS.
|
||||||
|
type Client struct {
|
||||||
|
cfg Config
|
||||||
|
httpClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates an SSO client. TLS 1.3 is required for all HTTPS
|
||||||
|
// connections to MCIAS.
|
||||||
|
func New(cfg Config) (*Client, error) {
|
||||||
|
if cfg.MciasURL == "" {
|
||||||
|
return nil, fmt.Errorf("sso: mcias_url is required")
|
||||||
|
}
|
||||||
|
if cfg.ClientID == "" {
|
||||||
|
return nil, fmt.Errorf("sso: client_id is required")
|
||||||
|
}
|
||||||
|
if cfg.RedirectURI == "" {
|
||||||
|
return nil, fmt.Errorf("sso: redirect_uri is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
transport := &http.Transport{}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(cfg.MciasURL, "http://") {
|
||||||
|
tlsCfg := &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS13,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.CACert != "" {
|
||||||
|
pem, err := os.ReadFile(cfg.CACert) //nolint:gosec // CA cert path from operator config
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("sso: read CA cert %s: %w", cfg.CACert, err)
|
||||||
|
}
|
||||||
|
pool := x509.NewCertPool()
|
||||||
|
if !pool.AppendCertsFromPEM(pem) {
|
||||||
|
return nil, fmt.Errorf("sso: no valid certificates in %s", cfg.CACert)
|
||||||
|
}
|
||||||
|
tlsCfg.RootCAs = pool
|
||||||
|
}
|
||||||
|
|
||||||
|
transport.TLSClientConfig = tlsCfg
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Client{
|
||||||
|
cfg: cfg,
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Transport: transport,
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthorizeURL returns the MCIAS authorize URL with the given state parameter.
|
||||||
|
func (c *Client) AuthorizeURL(state string) string {
|
||||||
|
base := strings.TrimRight(c.cfg.MciasURL, "/")
|
||||||
|
return base + "/sso/authorize?" + url.Values{
|
||||||
|
"client_id": {c.cfg.ClientID},
|
||||||
|
"redirect_uri": {c.cfg.RedirectURI},
|
||||||
|
"state": {state},
|
||||||
|
}.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExchangeCode exchanges an authorization code for a JWT token by calling
|
||||||
|
// MCIAS POST /v1/sso/token.
|
||||||
|
func (c *Client) ExchangeCode(ctx context.Context, code string) (token string, expiresAt time.Time, err error) {
|
||||||
|
reqBody, _ := json.Marshal(map[string]string{
|
||||||
|
"code": code,
|
||||||
|
"client_id": c.cfg.ClientID,
|
||||||
|
"redirect_uri": c.cfg.RedirectURI,
|
||||||
|
})
|
||||||
|
|
||||||
|
base := strings.TrimRight(c.cfg.MciasURL, "/")
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||||
|
base+"/v1/sso/token", bytes.NewReader(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return "", time.Time{}, fmt.Errorf("sso: build exchange request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", time.Time{}, fmt.Errorf("sso: MCIAS exchange: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", time.Time{}, fmt.Errorf("sso: read exchange response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return "", time.Time{}, fmt.Errorf("sso: exchange failed (HTTP %d): %s", resp.StatusCode, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
ExpiresAt string `json:"expires_at"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(body, &result); err != nil {
|
||||||
|
return "", time.Time{}, fmt.Errorf("sso: decode exchange response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp, parseErr := time.Parse(time.RFC3339, result.ExpiresAt)
|
||||||
|
if parseErr != nil {
|
||||||
|
exp = time.Now().Add(1 * time.Hour)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.Token, exp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateState returns a cryptographically random hex-encoded state string.
|
||||||
|
func GenerateState() (string, error) {
|
||||||
|
raw := make([]byte, stateBytes)
|
||||||
|
if _, err := rand.Read(raw); err != nil {
|
||||||
|
return "", fmt.Errorf("sso: generate state: %w", err)
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(raw), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateCookieName returns the cookie name used for SSO state for a given
|
||||||
|
// service cookie prefix (e.g., "mcr" → "mcr_sso_state").
|
||||||
|
func StateCookieName(prefix string) string {
|
||||||
|
return prefix + "_sso_state"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReturnToCookieName returns the cookie name used for SSO return-to URL
|
||||||
|
// (e.g., "mcr" → "mcr_sso_return").
|
||||||
|
func ReturnToCookieName(prefix string) string {
|
||||||
|
return prefix + "_sso_return"
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStateCookie stores the SSO state in a short-lived cookie.
|
||||||
|
//
|
||||||
|
// Security: SameSite=Lax is required because the redirect from MCIAS back to
|
||||||
|
// the service is a cross-site top-level navigation. SameSite=Strict cookies
|
||||||
|
// would not be sent on that redirect.
|
||||||
|
func SetStateCookie(w http.ResponseWriter, prefix, state string) {
|
||||||
|
http.SetCookie(w, &http.Cookie{
|
||||||
|
Name: StateCookieName(prefix),
|
||||||
|
Value: state,
|
||||||
|
Path: "/",
|
||||||
|
MaxAge: stateCookieAge,
|
||||||
|
HttpOnly: true,
|
||||||
|
Secure: true,
|
||||||
|
SameSite: http.SameSiteLaxMode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateStateCookie compares the state query parameter against the state
|
||||||
|
// cookie. If they match, the cookie is cleared and nil is returned.
|
||||||
|
func ValidateStateCookie(w http.ResponseWriter, r *http.Request, prefix, queryState string) error {
|
||||||
|
c, err := r.Cookie(StateCookieName(prefix))
|
||||||
|
if err != nil || c.Value == "" {
|
||||||
|
return fmt.Errorf("sso: missing state cookie")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Value != queryState {
|
||||||
|
return fmt.Errorf("sso: state mismatch")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the state cookie (single-use).
|
||||||
|
http.SetCookie(w, &http.Cookie{
|
||||||
|
Name: StateCookieName(prefix),
|
||||||
|
Value: "",
|
||||||
|
Path: "/",
|
||||||
|
MaxAge: -1,
|
||||||
|
HttpOnly: true,
|
||||||
|
Secure: true,
|
||||||
|
SameSite: http.SameSiteLaxMode,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReturnToCookie stores the current request path so the service can
|
||||||
|
// redirect back to it after SSO login completes.
|
||||||
|
func SetReturnToCookie(w http.ResponseWriter, r *http.Request, prefix string) {
|
||||||
|
path := r.URL.Path
|
||||||
|
if path == "" || path == "/login" || strings.HasPrefix(path, "/sso/") {
|
||||||
|
path = "/"
|
||||||
|
}
|
||||||
|
http.SetCookie(w, &http.Cookie{
|
||||||
|
Name: ReturnToCookieName(prefix),
|
||||||
|
Value: path,
|
||||||
|
Path: "/",
|
||||||
|
MaxAge: stateCookieAge,
|
||||||
|
HttpOnly: true,
|
||||||
|
Secure: true,
|
||||||
|
SameSite: http.SameSiteLaxMode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeReturnToCookie reads and clears the return-to cookie, returning
|
||||||
|
// the path. Returns "/" if the cookie is missing or empty.
|
||||||
|
func ConsumeReturnToCookie(w http.ResponseWriter, r *http.Request, prefix string) string {
|
||||||
|
c, err := r.Cookie(ReturnToCookieName(prefix))
|
||||||
|
path := "/"
|
||||||
|
if err == nil && c.Value != "" {
|
||||||
|
path = c.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the cookie.
|
||||||
|
http.SetCookie(w, &http.Cookie{
|
||||||
|
Name: ReturnToCookieName(prefix),
|
||||||
|
Value: "",
|
||||||
|
Path: "/",
|
||||||
|
MaxAge: -1,
|
||||||
|
HttpOnly: true,
|
||||||
|
Secure: true,
|
||||||
|
SameSite: http.SameSiteLaxMode,
|
||||||
|
})
|
||||||
|
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedirectToLogin generates a state, sets the state and return-to cookies,
|
||||||
|
// and redirects the user to the MCIAS authorize URL.
|
||||||
|
func RedirectToLogin(w http.ResponseWriter, r *http.Request, client *Client, cookiePrefix string) error {
|
||||||
|
state, err := GenerateState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
SetStateCookie(w, cookiePrefix, state)
|
||||||
|
SetReturnToCookie(w, r, cookiePrefix)
|
||||||
|
http.Redirect(w, r, client.AuthorizeURL(state), http.StatusFound)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleCallback validates the state, exchanges the authorization code for
|
||||||
|
// a JWT, and returns the token and the return-to path. The caller should
|
||||||
|
// set the session cookie with the returned token.
|
||||||
|
func HandleCallback(w http.ResponseWriter, r *http.Request, client *Client, cookiePrefix string) (token, returnTo string, err error) {
|
||||||
|
code := r.URL.Query().Get("code")
|
||||||
|
state := r.URL.Query().Get("state")
|
||||||
|
if code == "" || state == "" {
|
||||||
|
return "", "", fmt.Errorf("sso: missing code or state parameter")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ValidateStateCookie(w, r, cookiePrefix, state); err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
token, _, err = client.ExchangeCode(r.Context(), code)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
returnTo = ConsumeReturnToCookie(w, r, cookiePrefix)
|
||||||
|
return token, returnTo, nil
|
||||||
|
}
|
||||||
36
vendor/git.wntrmute.dev/mc/mcdsl/terminal/terminal.go
vendored
Normal file
36
vendor/git.wntrmute.dev/mc/mcdsl/terminal/terminal.go
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// Package terminal provides secure terminal input helpers for CLI tools.
|
||||||
|
package terminal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/term"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadPassword prints the given prompt to stderr and reads a password
|
||||||
|
// from the terminal with echo disabled. It prints a newline after the
|
||||||
|
// input is complete so the cursor advances normally.
|
||||||
|
func ReadPassword(prompt string) (string, error) {
|
||||||
|
b, err := readRaw(prompt)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadPasswordBytes is like ReadPassword but returns a []byte so the
|
||||||
|
// caller can zeroize the buffer after use.
|
||||||
|
func ReadPasswordBytes(prompt string) ([]byte, error) {
|
||||||
|
return readRaw(prompt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRaw(prompt string) ([]byte, error) {
|
||||||
|
fmt.Fprint(os.Stderr, prompt)
|
||||||
|
b, err := term.ReadPassword(int(os.Stdin.Fd())) //nolint:gosec // fd fits in int
|
||||||
|
fmt.Fprintln(os.Stderr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
104
vendor/git.wntrmute.dev/mc/mcdsl/web/web.go
vendored
Normal file
104
vendor/git.wntrmute.dev/mc/mcdsl/web/web.go
vendored
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
// Package web provides session cookie management, auth middleware, and
|
||||||
|
// template rendering helpers for Metacircular web UIs built with htmx
|
||||||
|
// and Go html/template.
|
||||||
|
package web
|
||||||
|
|
||||||
|
import (
|
||||||
|
"html/template"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/mc/mcdsl/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetSessionCookie sets a session cookie with the standard Metacircular
|
||||||
|
// security flags: HttpOnly, Secure, SameSite=Strict.
|
||||||
|
func SetSessionCookie(w http.ResponseWriter, name, token string) {
|
||||||
|
http.SetCookie(w, &http.Cookie{
|
||||||
|
Name: name,
|
||||||
|
Value: token,
|
||||||
|
Path: "/",
|
||||||
|
HttpOnly: true,
|
||||||
|
Secure: true,
|
||||||
|
SameSite: http.SameSiteStrictMode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSessionCookie removes a session cookie by setting it to empty
|
||||||
|
// with MaxAge=-1.
|
||||||
|
func ClearSessionCookie(w http.ResponseWriter, name string) {
|
||||||
|
http.SetCookie(w, &http.Cookie{
|
||||||
|
Name: name,
|
||||||
|
Value: "",
|
||||||
|
Path: "/",
|
||||||
|
MaxAge: -1,
|
||||||
|
HttpOnly: true,
|
||||||
|
Secure: true,
|
||||||
|
SameSite: http.SameSiteStrictMode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSessionToken extracts the session token from the named cookie.
|
||||||
|
// Returns empty string if the cookie is missing or empty.
|
||||||
|
func GetSessionToken(r *http.Request, name string) string {
|
||||||
|
c, err := r.Cookie(name)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return c.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequireAuth returns middleware that validates the session token via
|
||||||
|
// the Authenticator. If the token is missing or invalid, the user is
|
||||||
|
// redirected to loginPath. On success, the [auth.TokenInfo] is stored
|
||||||
|
// in the request context (retrievable via [auth.TokenInfoFromContext]).
|
||||||
|
func RequireAuth(authenticator *auth.Authenticator, cookieName, loginPath string) func(http.Handler) http.Handler {
|
||||||
|
return func(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
token := GetSessionToken(r, cookieName)
|
||||||
|
if token == "" {
|
||||||
|
http.Redirect(w, r, loginPath, http.StatusFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := authenticator.ValidateToken(token)
|
||||||
|
if err != nil {
|
||||||
|
ClearSessionCookie(w, cookieName)
|
||||||
|
http.Redirect(w, r, loginPath, http.StatusFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := auth.ContextWithTokenInfo(r.Context(), info)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderTemplate parses and executes a template from an embedded FS.
|
||||||
|
// It parses "templates/layout.html" and "templates/<name>", merges
|
||||||
|
// any provided FuncMaps, and executes the "layout" template with data.
|
||||||
|
//
|
||||||
|
// This matches the layout + page block pattern used by all Metacircular
|
||||||
|
// web UIs.
|
||||||
|
func RenderTemplate(w http.ResponseWriter, fsys fs.FS, name string, data any, funcs ...template.FuncMap) {
|
||||||
|
merged := template.FuncMap{}
|
||||||
|
for _, fm := range funcs {
|
||||||
|
for k, v := range fm {
|
||||||
|
merged[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl, err := template.New("").Funcs(merged).ParseFS(fsys,
|
||||||
|
"templates/layout.html",
|
||||||
|
"templates/"+name,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "template error", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||||
|
if err := tmpl.ExecuteTemplate(w, "layout", data); err != nil {
|
||||||
|
http.Error(w, "template error", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
go_import_path: github.com/dustin/go-humanize
|
||||||
|
go:
|
||||||
|
- 1.13.x
|
||||||
|
- 1.14.x
|
||||||
|
- 1.15.x
|
||||||
|
- 1.16.x
|
||||||
|
- stable
|
||||||
|
- master
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: master
|
||||||
|
fast_finish: true
|
||||||
|
install:
|
||||||
|
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||||
|
script:
|
||||||
|
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||||
|
- go vet .
|
||||||
|
- go install -v -race ./...
|
||||||
|
- go test -v -race ./...
|
||||||
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
Normal file
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
<http://www.opensource.org/licenses/mit-license.php>
|
||||||
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
Normal file
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
# Humane Units [](https://travis-ci.org/dustin/go-humanize) [](https://godoc.org/github.com/dustin/go-humanize)
|
||||||
|
|
||||||
|
Just a few functions for helping humanize times and sizes.
|
||||||
|
|
||||||
|
`go get` it as `github.com/dustin/go-humanize`, import it as
|
||||||
|
`"github.com/dustin/go-humanize"`, use it as `humanize`.
|
||||||
|
|
||||||
|
See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
|
||||||
|
complete documentation.
|
||||||
|
|
||||||
|
## Sizes
|
||||||
|
|
||||||
|
This lets you take numbers like `82854982` and convert them to useful
|
||||||
|
strings like, `83 MB` or `79 MiB` (whichever you prefer).
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Times
|
||||||
|
|
||||||
|
This lets you take a `time.Time` and spit it out in relative terms.
|
||||||
|
For example, `12 seconds ago` or `3 days from now`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
|
||||||
|
```
|
||||||
|
|
||||||
|
Thanks to Kyle Lemons for the time implementation from an IRC
|
||||||
|
conversation one day. It's pretty neat.
|
||||||
|
|
||||||
|
## Ordinals
|
||||||
|
|
||||||
|
From a [mailing list discussion][odisc] where a user wanted to be able
|
||||||
|
to label ordinals.
|
||||||
|
|
||||||
|
0 -> 0th
|
||||||
|
1 -> 1st
|
||||||
|
2 -> 2nd
|
||||||
|
3 -> 3rd
|
||||||
|
4 -> 4th
|
||||||
|
[...]
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commas
|
||||||
|
|
||||||
|
Want to shove commas into numbers? Be my guest.
|
||||||
|
|
||||||
|
0 -> 0
|
||||||
|
100 -> 100
|
||||||
|
1000 -> 1,000
|
||||||
|
1000000000 -> 1,000,000,000
|
||||||
|
-100000 -> -100,000
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Ftoa
|
||||||
|
|
||||||
|
Nicer float64 formatter that removes trailing zeros.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("%f", 2.24) // 2.240000
|
||||||
|
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
|
||||||
|
fmt.Printf("%f", 2.0) // 2.000000
|
||||||
|
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
|
||||||
|
```
|
||||||
|
|
||||||
|
## SI notation
|
||||||
|
|
||||||
|
Format numbers with [SI notation][sinotation].
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
humanize.SI(0.00000000223, "M") // 2.23 nM
|
||||||
|
```
|
||||||
|
|
||||||
|
## English-specific functions
|
||||||
|
|
||||||
|
The following functions are in the `humanize/english` subpackage.
|
||||||
|
|
||||||
|
### Plurals
|
||||||
|
|
||||||
|
Simple English pluralization
|
||||||
|
|
||||||
|
```go
|
||||||
|
english.PluralWord(1, "object", "") // object
|
||||||
|
english.PluralWord(42, "object", "") // objects
|
||||||
|
english.PluralWord(2, "bus", "") // buses
|
||||||
|
english.PluralWord(99, "locus", "loci") // loci
|
||||||
|
|
||||||
|
english.Plural(1, "object", "") // 1 object
|
||||||
|
english.Plural(42, "object", "") // 42 objects
|
||||||
|
english.Plural(2, "bus", "") // 2 buses
|
||||||
|
english.Plural(99, "locus", "loci") // 99 loci
|
||||||
|
```
|
||||||
|
|
||||||
|
### Word series
|
||||||
|
|
||||||
|
Format comma-separated words lists with conjuctions:
|
||||||
|
|
||||||
|
```go
|
||||||
|
english.WordSeries([]string{"foo"}, "and") // foo
|
||||||
|
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
|
||||||
|
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
|
||||||
|
|
||||||
|
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
|
||||||
|
```
|
||||||
|
|
||||||
|
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
|
||||||
|
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
|
||||||
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
Normal file
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// order of magnitude (to a max order)
|
||||||
|
func oomm(n, b *big.Int, maxmag int) (float64, int) {
|
||||||
|
mag := 0
|
||||||
|
m := &big.Int{}
|
||||||
|
for n.Cmp(b) >= 0 {
|
||||||
|
n.DivMod(n, b, m)
|
||||||
|
mag++
|
||||||
|
if mag == maxmag && maxmag >= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||||
|
}
|
||||||
|
|
||||||
|
// total order of magnitude
|
||||||
|
// (same as above, but with no upper limit)
|
||||||
|
func oom(n, b *big.Int) (float64, int) {
|
||||||
|
mag := 0
|
||||||
|
m := &big.Int{}
|
||||||
|
for n.Cmp(b) >= 0 {
|
||||||
|
n.DivMod(n, b, m)
|
||||||
|
mag++
|
||||||
|
}
|
||||||
|
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||||
|
}
|
||||||
189
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
Normal file
189
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bigIECExp = big.NewInt(1024)
|
||||||
|
|
||||||
|
// BigByte is one byte in bit.Ints
|
||||||
|
BigByte = big.NewInt(1)
|
||||||
|
// BigKiByte is 1,024 bytes in bit.Ints
|
||||||
|
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
|
||||||
|
// BigMiByte is 1,024 k bytes in bit.Ints
|
||||||
|
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
|
||||||
|
// BigGiByte is 1,024 m bytes in bit.Ints
|
||||||
|
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
|
||||||
|
// BigTiByte is 1,024 g bytes in bit.Ints
|
||||||
|
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
|
||||||
|
// BigPiByte is 1,024 t bytes in bit.Ints
|
||||||
|
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
|
||||||
|
// BigEiByte is 1,024 p bytes in bit.Ints
|
||||||
|
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
|
||||||
|
// BigZiByte is 1,024 e bytes in bit.Ints
|
||||||
|
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
|
||||||
|
// BigYiByte is 1,024 z bytes in bit.Ints
|
||||||
|
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
|
||||||
|
// BigRiByte is 1,024 y bytes in bit.Ints
|
||||||
|
BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
|
||||||
|
// BigQiByte is 1,024 r bytes in bit.Ints
|
||||||
|
BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bigSIExp = big.NewInt(1000)
|
||||||
|
|
||||||
|
// BigSIByte is one SI byte in big.Ints
|
||||||
|
BigSIByte = big.NewInt(1)
|
||||||
|
// BigKByte is 1,000 SI bytes in big.Ints
|
||||||
|
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
|
||||||
|
// BigMByte is 1,000 SI k bytes in big.Ints
|
||||||
|
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
|
||||||
|
// BigGByte is 1,000 SI m bytes in big.Ints
|
||||||
|
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
|
||||||
|
// BigTByte is 1,000 SI g bytes in big.Ints
|
||||||
|
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
|
||||||
|
// BigPByte is 1,000 SI t bytes in big.Ints
|
||||||
|
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
|
||||||
|
// BigEByte is 1,000 SI p bytes in big.Ints
|
||||||
|
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
|
||||||
|
// BigZByte is 1,000 SI e bytes in big.Ints
|
||||||
|
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
|
||||||
|
// BigYByte is 1,000 SI z bytes in big.Ints
|
||||||
|
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
|
||||||
|
// BigRByte is 1,000 SI y bytes in big.Ints
|
||||||
|
BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
|
||||||
|
// BigQByte is 1,000 SI r bytes in big.Ints
|
||||||
|
BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
|
||||||
|
)
|
||||||
|
|
||||||
|
var bigBytesSizeTable = map[string]*big.Int{
|
||||||
|
"b": BigByte,
|
||||||
|
"kib": BigKiByte,
|
||||||
|
"kb": BigKByte,
|
||||||
|
"mib": BigMiByte,
|
||||||
|
"mb": BigMByte,
|
||||||
|
"gib": BigGiByte,
|
||||||
|
"gb": BigGByte,
|
||||||
|
"tib": BigTiByte,
|
||||||
|
"tb": BigTByte,
|
||||||
|
"pib": BigPiByte,
|
||||||
|
"pb": BigPByte,
|
||||||
|
"eib": BigEiByte,
|
||||||
|
"eb": BigEByte,
|
||||||
|
"zib": BigZiByte,
|
||||||
|
"zb": BigZByte,
|
||||||
|
"yib": BigYiByte,
|
||||||
|
"yb": BigYByte,
|
||||||
|
"rib": BigRiByte,
|
||||||
|
"rb": BigRByte,
|
||||||
|
"qib": BigQiByte,
|
||||||
|
"qb": BigQByte,
|
||||||
|
// Without suffix
|
||||||
|
"": BigByte,
|
||||||
|
"ki": BigKiByte,
|
||||||
|
"k": BigKByte,
|
||||||
|
"mi": BigMiByte,
|
||||||
|
"m": BigMByte,
|
||||||
|
"gi": BigGiByte,
|
||||||
|
"g": BigGByte,
|
||||||
|
"ti": BigTiByte,
|
||||||
|
"t": BigTByte,
|
||||||
|
"pi": BigPiByte,
|
||||||
|
"p": BigPByte,
|
||||||
|
"ei": BigEiByte,
|
||||||
|
"e": BigEByte,
|
||||||
|
"z": BigZByte,
|
||||||
|
"zi": BigZiByte,
|
||||||
|
"y": BigYByte,
|
||||||
|
"yi": BigYiByte,
|
||||||
|
"r": BigRByte,
|
||||||
|
"ri": BigRiByte,
|
||||||
|
"q": BigQByte,
|
||||||
|
"qi": BigQiByte,
|
||||||
|
}
|
||||||
|
|
||||||
|
var ten = big.NewInt(10)
|
||||||
|
|
||||||
|
func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
||||||
|
if s.Cmp(ten) < 0 {
|
||||||
|
return fmt.Sprintf("%d B", s)
|
||||||
|
}
|
||||||
|
c := (&big.Int{}).Set(s)
|
||||||
|
val, mag := oomm(c, base, len(sizes)-1)
|
||||||
|
suffix := sizes[mag]
|
||||||
|
f := "%.0f %s"
|
||||||
|
if val < 10 {
|
||||||
|
f = "%.1f %s"
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(f, val, suffix)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigBytes produces a human readable representation of an SI size.
|
||||||
|
//
|
||||||
|
// See also: ParseBigBytes.
|
||||||
|
//
|
||||||
|
// BigBytes(82854982) -> 83 MB
|
||||||
|
func BigBytes(s *big.Int) string {
|
||||||
|
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
|
||||||
|
return humanateBigBytes(s, bigSIExp, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigIBytes produces a human readable representation of an IEC size.
|
||||||
|
//
|
||||||
|
// See also: ParseBigBytes.
|
||||||
|
//
|
||||||
|
// BigIBytes(82854982) -> 79 MiB
|
||||||
|
func BigIBytes(s *big.Int) string {
|
||||||
|
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
|
||||||
|
return humanateBigBytes(s, bigIECExp, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBigBytes parses a string representation of bytes into the number
|
||||||
|
// of bytes it represents.
|
||||||
|
//
|
||||||
|
// See also: BigBytes, BigIBytes.
|
||||||
|
//
|
||||||
|
// ParseBigBytes("42 MB") -> 42000000, nil
|
||||||
|
// ParseBigBytes("42 mib") -> 44040192, nil
|
||||||
|
func ParseBigBytes(s string) (*big.Int, error) {
|
||||||
|
lastDigit := 0
|
||||||
|
hasComma := false
|
||||||
|
for _, r := range s {
|
||||||
|
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if r == ',' {
|
||||||
|
hasComma = true
|
||||||
|
}
|
||||||
|
lastDigit++
|
||||||
|
}
|
||||||
|
|
||||||
|
num := s[:lastDigit]
|
||||||
|
if hasComma {
|
||||||
|
num = strings.Replace(num, ",", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
val := &big.Rat{}
|
||||||
|
_, err := fmt.Sscanf(num, "%f", val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||||
|
if m, ok := bigBytesSizeTable[extra]; ok {
|
||||||
|
mv := (&big.Rat{}).SetInt(m)
|
||||||
|
val.Mul(val, mv)
|
||||||
|
rv := &big.Int{}
|
||||||
|
rv.Div(val.Num(), val.Denom())
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unhandled size name: %v", extra)
|
||||||
|
}
|
||||||
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
Normal file
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IEC Sizes.
|
||||||
|
// kibis of bits
|
||||||
|
const (
|
||||||
|
Byte = 1 << (iota * 10)
|
||||||
|
KiByte
|
||||||
|
MiByte
|
||||||
|
GiByte
|
||||||
|
TiByte
|
||||||
|
PiByte
|
||||||
|
EiByte
|
||||||
|
)
|
||||||
|
|
||||||
|
// SI Sizes.
|
||||||
|
const (
|
||||||
|
IByte = 1
|
||||||
|
KByte = IByte * 1000
|
||||||
|
MByte = KByte * 1000
|
||||||
|
GByte = MByte * 1000
|
||||||
|
TByte = GByte * 1000
|
||||||
|
PByte = TByte * 1000
|
||||||
|
EByte = PByte * 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
var bytesSizeTable = map[string]uint64{
|
||||||
|
"b": Byte,
|
||||||
|
"kib": KiByte,
|
||||||
|
"kb": KByte,
|
||||||
|
"mib": MiByte,
|
||||||
|
"mb": MByte,
|
||||||
|
"gib": GiByte,
|
||||||
|
"gb": GByte,
|
||||||
|
"tib": TiByte,
|
||||||
|
"tb": TByte,
|
||||||
|
"pib": PiByte,
|
||||||
|
"pb": PByte,
|
||||||
|
"eib": EiByte,
|
||||||
|
"eb": EByte,
|
||||||
|
// Without suffix
|
||||||
|
"": Byte,
|
||||||
|
"ki": KiByte,
|
||||||
|
"k": KByte,
|
||||||
|
"mi": MiByte,
|
||||||
|
"m": MByte,
|
||||||
|
"gi": GiByte,
|
||||||
|
"g": GByte,
|
||||||
|
"ti": TiByte,
|
||||||
|
"t": TByte,
|
||||||
|
"pi": PiByte,
|
||||||
|
"p": PByte,
|
||||||
|
"ei": EiByte,
|
||||||
|
"e": EByte,
|
||||||
|
}
|
||||||
|
|
||||||
|
func logn(n, b float64) float64 {
|
||||||
|
return math.Log(n) / math.Log(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func humanateBytes(s uint64, base float64, sizes []string) string {
|
||||||
|
if s < 10 {
|
||||||
|
return fmt.Sprintf("%d B", s)
|
||||||
|
}
|
||||||
|
e := math.Floor(logn(float64(s), base))
|
||||||
|
suffix := sizes[int(e)]
|
||||||
|
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
|
||||||
|
f := "%.0f %s"
|
||||||
|
if val < 10 {
|
||||||
|
f = "%.1f %s"
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(f, val, suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes produces a human readable representation of an SI size.
|
||||||
|
//
|
||||||
|
// See also: ParseBytes.
|
||||||
|
//
|
||||||
|
// Bytes(82854982) -> 83 MB
|
||||||
|
func Bytes(s uint64) string {
|
||||||
|
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
|
||||||
|
return humanateBytes(s, 1000, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IBytes produces a human readable representation of an IEC size.
|
||||||
|
//
|
||||||
|
// See also: ParseBytes.
|
||||||
|
//
|
||||||
|
// IBytes(82854982) -> 79 MiB
|
||||||
|
func IBytes(s uint64) string {
|
||||||
|
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
|
||||||
|
return humanateBytes(s, 1024, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBytes parses a string representation of bytes into the number
|
||||||
|
// of bytes it represents.
|
||||||
|
//
|
||||||
|
// See Also: Bytes, IBytes.
|
||||||
|
//
|
||||||
|
// ParseBytes("42 MB") -> 42000000, nil
|
||||||
|
// ParseBytes("42 mib") -> 44040192, nil
|
||||||
|
func ParseBytes(s string) (uint64, error) {
|
||||||
|
lastDigit := 0
|
||||||
|
hasComma := false
|
||||||
|
for _, r := range s {
|
||||||
|
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if r == ',' {
|
||||||
|
hasComma = true
|
||||||
|
}
|
||||||
|
lastDigit++
|
||||||
|
}
|
||||||
|
|
||||||
|
num := s[:lastDigit]
|
||||||
|
if hasComma {
|
||||||
|
num = strings.Replace(num, ",", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := strconv.ParseFloat(num, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||||
|
if m, ok := bytesSizeTable[extra]; ok {
|
||||||
|
f *= float64(m)
|
||||||
|
if f >= math.MaxUint64 {
|
||||||
|
return 0, fmt.Errorf("too large: %v", s)
|
||||||
|
}
|
||||||
|
return uint64(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("unhandled size name: %v", extra)
|
||||||
|
}
|
||||||
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
Normal file
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Comma produces a string form of the given number in base 10 with
|
||||||
|
// commas after every three orders of magnitude.
|
||||||
|
//
|
||||||
|
// e.g. Comma(834142) -> 834,142
|
||||||
|
func Comma(v int64) string {
|
||||||
|
sign := ""
|
||||||
|
|
||||||
|
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
||||||
|
if v == math.MinInt64 {
|
||||||
|
return "-9,223,372,036,854,775,808"
|
||||||
|
}
|
||||||
|
|
||||||
|
if v < 0 {
|
||||||
|
sign = "-"
|
||||||
|
v = 0 - v
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := []string{"", "", "", "", "", "", ""}
|
||||||
|
j := len(parts) - 1
|
||||||
|
|
||||||
|
for v > 999 {
|
||||||
|
parts[j] = strconv.FormatInt(v%1000, 10)
|
||||||
|
switch len(parts[j]) {
|
||||||
|
case 2:
|
||||||
|
parts[j] = "0" + parts[j]
|
||||||
|
case 1:
|
||||||
|
parts[j] = "00" + parts[j]
|
||||||
|
}
|
||||||
|
v = v / 1000
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
parts[j] = strconv.Itoa(int(v))
|
||||||
|
return sign + strings.Join(parts[j:], ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commaf produces a string form of the given number in base 10 with
|
||||||
|
// commas after every three orders of magnitude.
|
||||||
|
//
|
||||||
|
// e.g. Commaf(834142.32) -> 834,142.32
|
||||||
|
func Commaf(v float64) string {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if v < 0 {
|
||||||
|
buf.Write([]byte{'-'})
|
||||||
|
v = 0 - v
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := []byte{','}
|
||||||
|
|
||||||
|
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
|
||||||
|
pos := 0
|
||||||
|
if len(parts[0])%3 != 0 {
|
||||||
|
pos += len(parts[0]) % 3
|
||||||
|
buf.WriteString(parts[0][:pos])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
for ; pos < len(parts[0]); pos += 3 {
|
||||||
|
buf.WriteString(parts[0][pos : pos+3])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
buf.Truncate(buf.Len() - 1)
|
||||||
|
|
||||||
|
if len(parts) > 1 {
|
||||||
|
buf.Write([]byte{'.'})
|
||||||
|
buf.WriteString(parts[1])
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommafWithDigits works like the Commaf but limits the resulting
|
||||||
|
// string to the given number of decimal places.
|
||||||
|
//
|
||||||
|
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
|
||||||
|
func CommafWithDigits(f float64, decimals int) string {
|
||||||
|
return stripTrailingDigits(Commaf(f), decimals)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigComma produces a string form of the given big.Int in base 10
|
||||||
|
// with commas after every three orders of magnitude.
|
||||||
|
func BigComma(b *big.Int) string {
|
||||||
|
sign := ""
|
||||||
|
if b.Sign() < 0 {
|
||||||
|
sign = "-"
|
||||||
|
b.Abs(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
athousand := big.NewInt(1000)
|
||||||
|
c := (&big.Int{}).Set(b)
|
||||||
|
_, m := oom(c, athousand)
|
||||||
|
parts := make([]string, m+1)
|
||||||
|
j := len(parts) - 1
|
||||||
|
|
||||||
|
mod := &big.Int{}
|
||||||
|
for b.Cmp(athousand) >= 0 {
|
||||||
|
b.DivMod(b, athousand, mod)
|
||||||
|
parts[j] = strconv.FormatInt(mod.Int64(), 10)
|
||||||
|
switch len(parts[j]) {
|
||||||
|
case 2:
|
||||||
|
parts[j] = "0" + parts[j]
|
||||||
|
case 1:
|
||||||
|
parts[j] = "00" + parts[j]
|
||||||
|
}
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
parts[j] = strconv.Itoa(int(b.Int64()))
|
||||||
|
return sign + strings.Join(parts[j:], ",")
|
||||||
|
}
|
||||||
41
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
Normal file
41
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
//go:build go1.6
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BigCommaf produces a string form of the given big.Float in base 10
|
||||||
|
// with commas after every three orders of magnitude.
|
||||||
|
func BigCommaf(v *big.Float) string {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if v.Sign() < 0 {
|
||||||
|
buf.Write([]byte{'-'})
|
||||||
|
v.Abs(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := []byte{','}
|
||||||
|
|
||||||
|
parts := strings.Split(v.Text('f', -1), ".")
|
||||||
|
pos := 0
|
||||||
|
if len(parts[0])%3 != 0 {
|
||||||
|
pos += len(parts[0]) % 3
|
||||||
|
buf.WriteString(parts[0][:pos])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
for ; pos < len(parts[0]); pos += 3 {
|
||||||
|
buf.WriteString(parts[0][pos : pos+3])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
buf.Truncate(buf.Len() - 1)
|
||||||
|
|
||||||
|
if len(parts) > 1 {
|
||||||
|
buf.Write([]byte{'.'})
|
||||||
|
buf.WriteString(parts[1])
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
49
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
Normal file
49
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func stripTrailingZeros(s string) string {
|
||||||
|
if !strings.ContainsRune(s, '.') {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
offset := len(s) - 1
|
||||||
|
for offset > 0 {
|
||||||
|
if s[offset] == '.' {
|
||||||
|
offset--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if s[offset] != '0' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset--
|
||||||
|
}
|
||||||
|
return s[:offset+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func stripTrailingDigits(s string, digits int) string {
|
||||||
|
if i := strings.Index(s, "."); i >= 0 {
|
||||||
|
if digits <= 0 {
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
if i+digits >= len(s) {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return s[:i+digits]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ftoa converts a float to a string with no trailing zeros.
|
||||||
|
func Ftoa(num float64) string {
|
||||||
|
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FtoaWithDigits converts a float to a string but limits the resulting string
|
||||||
|
// to the given number of decimal places, and no trailing zeros.
|
||||||
|
func FtoaWithDigits(num float64, digits int) string {
|
||||||
|
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
|
||||||
|
}
|
||||||
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
Normal file
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
/*
|
||||||
|
Package humanize converts boring ugly numbers to human-friendly strings and back.
|
||||||
|
|
||||||
|
Durations can be turned into strings such as "3 days ago", numbers
|
||||||
|
representing sizes like 82854982 into useful strings like, "83 MB" or
|
||||||
|
"79 MiB" (whichever you prefer).
|
||||||
|
*/
|
||||||
|
package humanize
|
||||||
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
Normal file
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
/*
|
||||||
|
Slightly adapted from the source to fit go-humanize.
|
||||||
|
|
||||||
|
Author: https://github.com/gorhill
|
||||||
|
Source: https://gist.github.com/gorhill/5285193
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
renderFloatPrecisionMultipliers = [...]float64{
|
||||||
|
1,
|
||||||
|
10,
|
||||||
|
100,
|
||||||
|
1000,
|
||||||
|
10000,
|
||||||
|
100000,
|
||||||
|
1000000,
|
||||||
|
10000000,
|
||||||
|
100000000,
|
||||||
|
1000000000,
|
||||||
|
}
|
||||||
|
|
||||||
|
renderFloatPrecisionRounders = [...]float64{
|
||||||
|
0.5,
|
||||||
|
0.05,
|
||||||
|
0.005,
|
||||||
|
0.0005,
|
||||||
|
0.00005,
|
||||||
|
0.000005,
|
||||||
|
0.0000005,
|
||||||
|
0.00000005,
|
||||||
|
0.000000005,
|
||||||
|
0.0000000005,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
|
||||||
|
// * thousands separator
|
||||||
|
// * decimal separator
|
||||||
|
// * decimal precision
|
||||||
|
//
|
||||||
|
// Usage: s := RenderFloat(format, n)
|
||||||
|
// The format parameter tells how to render the number n.
|
||||||
|
//
|
||||||
|
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
|
||||||
|
//
|
||||||
|
// Examples of format strings, given n = 12345.6789:
|
||||||
|
// "#,###.##" => "12,345.67"
|
||||||
|
// "#,###." => "12,345"
|
||||||
|
// "#,###" => "12345,678"
|
||||||
|
// "#\u202F###,##" => "12 345,68"
|
||||||
|
// "#.###,###### => 12.345,678900
|
||||||
|
// "" (aka default format) => 12,345.67
|
||||||
|
//
|
||||||
|
// The highest precision allowed is 9 digits after the decimal symbol.
|
||||||
|
// There is also a version for integer number, FormatInteger(),
|
||||||
|
// which is convenient for calls within template.
|
||||||
|
func FormatFloat(format string, n float64) string {
|
||||||
|
// Special cases:
|
||||||
|
// NaN = "NaN"
|
||||||
|
// +Inf = "+Infinity"
|
||||||
|
// -Inf = "-Infinity"
|
||||||
|
if math.IsNaN(n) {
|
||||||
|
return "NaN"
|
||||||
|
}
|
||||||
|
if n > math.MaxFloat64 {
|
||||||
|
return "Infinity"
|
||||||
|
}
|
||||||
|
if n < (0.0 - math.MaxFloat64) {
|
||||||
|
return "-Infinity"
|
||||||
|
}
|
||||||
|
|
||||||
|
// default format
|
||||||
|
precision := 2
|
||||||
|
decimalStr := "."
|
||||||
|
thousandStr := ","
|
||||||
|
positiveStr := ""
|
||||||
|
negativeStr := "-"
|
||||||
|
|
||||||
|
if len(format) > 0 {
|
||||||
|
format := []rune(format)
|
||||||
|
|
||||||
|
// If there is an explicit format directive,
|
||||||
|
// then default values are these:
|
||||||
|
precision = 9
|
||||||
|
thousandStr = ""
|
||||||
|
|
||||||
|
// collect indices of meaningful formatting directives
|
||||||
|
formatIndx := []int{}
|
||||||
|
for i, char := range format {
|
||||||
|
if char != '#' && char != '0' {
|
||||||
|
formatIndx = append(formatIndx, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(formatIndx) > 0 {
|
||||||
|
// Directive at index 0:
|
||||||
|
// Must be a '+'
|
||||||
|
// Raise an error if not the case
|
||||||
|
// index: 0123456789
|
||||||
|
// +0.000,000
|
||||||
|
// +000,000.0
|
||||||
|
// +0000.00
|
||||||
|
// +0000
|
||||||
|
if formatIndx[0] == 0 {
|
||||||
|
if format[formatIndx[0]] != '+' {
|
||||||
|
panic("RenderFloat(): invalid positive sign directive")
|
||||||
|
}
|
||||||
|
positiveStr = "+"
|
||||||
|
formatIndx = formatIndx[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two directives:
|
||||||
|
// First is thousands separator
|
||||||
|
// Raise an error if not followed by 3-digit
|
||||||
|
// 0123456789
|
||||||
|
// 0.000,000
|
||||||
|
// 000,000.00
|
||||||
|
if len(formatIndx) == 2 {
|
||||||
|
if (formatIndx[1] - formatIndx[0]) != 4 {
|
||||||
|
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
|
||||||
|
}
|
||||||
|
thousandStr = string(format[formatIndx[0]])
|
||||||
|
formatIndx = formatIndx[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// One directive:
|
||||||
|
// Directive is decimal separator
|
||||||
|
// The number of digit-specifier following the separator indicates wanted precision
|
||||||
|
// 0123456789
|
||||||
|
// 0.00
|
||||||
|
// 000,0000
|
||||||
|
if len(formatIndx) == 1 {
|
||||||
|
decimalStr = string(format[formatIndx[0]])
|
||||||
|
precision = len(format) - formatIndx[0] - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate sign part
|
||||||
|
var signStr string
|
||||||
|
if n >= 0.000000001 {
|
||||||
|
signStr = positiveStr
|
||||||
|
} else if n <= -0.000000001 {
|
||||||
|
signStr = negativeStr
|
||||||
|
n = -n
|
||||||
|
} else {
|
||||||
|
signStr = ""
|
||||||
|
n = 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
// split number into integer and fractional parts
|
||||||
|
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
|
||||||
|
|
||||||
|
// generate integer part string
|
||||||
|
intStr := strconv.FormatInt(int64(intf), 10)
|
||||||
|
|
||||||
|
// add thousand separator if required
|
||||||
|
if len(thousandStr) > 0 {
|
||||||
|
for i := len(intStr); i > 3; {
|
||||||
|
i -= 3
|
||||||
|
intStr = intStr[:i] + thousandStr + intStr[i:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// no fractional part, we can leave now
|
||||||
|
if precision == 0 {
|
||||||
|
return signStr + intStr
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate fractional part
|
||||||
|
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
|
||||||
|
// may need padding
|
||||||
|
if len(fracStr) < precision {
|
||||||
|
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
|
||||||
|
}
|
||||||
|
|
||||||
|
return signStr + intStr + decimalStr + fracStr
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatInteger produces a formatted number as string.
|
||||||
|
// See FormatFloat.
|
||||||
|
func FormatInteger(format string, n int) string {
|
||||||
|
return FormatFloat(format, float64(n))
|
||||||
|
}
|
||||||
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
Normal file
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
// Ordinal gives you the input number in a rank/ordinal format.
|
||||||
|
//
|
||||||
|
// Ordinal(3) -> 3rd
|
||||||
|
func Ordinal(x int) string {
|
||||||
|
suffix := "th"
|
||||||
|
switch x % 10 {
|
||||||
|
case 1:
|
||||||
|
if x%100 != 11 {
|
||||||
|
suffix = "st"
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if x%100 != 12 {
|
||||||
|
suffix = "nd"
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
if x%100 != 13 {
|
||||||
|
suffix = "rd"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strconv.Itoa(x) + suffix
|
||||||
|
}
|
||||||
127
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
Normal file
127
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"math"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var siPrefixTable = map[float64]string{
|
||||||
|
-30: "q", // quecto
|
||||||
|
-27: "r", // ronto
|
||||||
|
-24: "y", // yocto
|
||||||
|
-21: "z", // zepto
|
||||||
|
-18: "a", // atto
|
||||||
|
-15: "f", // femto
|
||||||
|
-12: "p", // pico
|
||||||
|
-9: "n", // nano
|
||||||
|
-6: "µ", // micro
|
||||||
|
-3: "m", // milli
|
||||||
|
0: "",
|
||||||
|
3: "k", // kilo
|
||||||
|
6: "M", // mega
|
||||||
|
9: "G", // giga
|
||||||
|
12: "T", // tera
|
||||||
|
15: "P", // peta
|
||||||
|
18: "E", // exa
|
||||||
|
21: "Z", // zetta
|
||||||
|
24: "Y", // yotta
|
||||||
|
27: "R", // ronna
|
||||||
|
30: "Q", // quetta
|
||||||
|
}
|
||||||
|
|
||||||
|
var revSIPrefixTable = revfmap(siPrefixTable)
|
||||||
|
|
||||||
|
// revfmap reverses the map and precomputes the power multiplier
|
||||||
|
func revfmap(in map[float64]string) map[string]float64 {
|
||||||
|
rv := map[string]float64{}
|
||||||
|
for k, v := range in {
|
||||||
|
rv[v] = math.Pow(10, k)
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
var riParseRegex *regexp.Regexp
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ri := `^([\-0-9.]+)\s?([`
|
||||||
|
for _, v := range siPrefixTable {
|
||||||
|
ri += v
|
||||||
|
}
|
||||||
|
ri += `]?)(.*)`
|
||||||
|
|
||||||
|
riParseRegex = regexp.MustCompile(ri)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeSI finds the most appropriate SI prefix for the given number
|
||||||
|
// and returns the prefix along with the value adjusted to be within
|
||||||
|
// that prefix.
|
||||||
|
//
|
||||||
|
// See also: SI, ParseSI.
|
||||||
|
//
|
||||||
|
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
|
||||||
|
func ComputeSI(input float64) (float64, string) {
|
||||||
|
if input == 0 {
|
||||||
|
return 0, ""
|
||||||
|
}
|
||||||
|
mag := math.Abs(input)
|
||||||
|
exponent := math.Floor(logn(mag, 10))
|
||||||
|
exponent = math.Floor(exponent/3) * 3
|
||||||
|
|
||||||
|
value := mag / math.Pow(10, exponent)
|
||||||
|
|
||||||
|
// Handle special case where value is exactly 1000.0
|
||||||
|
// Should return 1 M instead of 1000 k
|
||||||
|
if value == 1000.0 {
|
||||||
|
exponent += 3
|
||||||
|
value = mag / math.Pow(10, exponent)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = math.Copysign(value, input)
|
||||||
|
|
||||||
|
prefix := siPrefixTable[exponent]
|
||||||
|
return value, prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
// SI returns a string with default formatting.
|
||||||
|
//
|
||||||
|
// SI uses Ftoa to format float value, removing trailing zeros.
|
||||||
|
//
|
||||||
|
// See also: ComputeSI, ParseSI.
|
||||||
|
//
|
||||||
|
// e.g. SI(1000000, "B") -> 1 MB
|
||||||
|
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
||||||
|
func SI(input float64, unit string) string {
|
||||||
|
value, prefix := ComputeSI(input)
|
||||||
|
return Ftoa(value) + " " + prefix + unit
|
||||||
|
}
|
||||||
|
|
||||||
|
// SIWithDigits works like SI but limits the resulting string to the
|
||||||
|
// given number of decimal places.
|
||||||
|
//
|
||||||
|
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
|
||||||
|
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
|
||||||
|
func SIWithDigits(input float64, decimals int, unit string) string {
|
||||||
|
value, prefix := ComputeSI(input)
|
||||||
|
return FtoaWithDigits(value, decimals) + " " + prefix + unit
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInvalid = errors.New("invalid input")
|
||||||
|
|
||||||
|
// ParseSI parses an SI string back into the number and unit.
|
||||||
|
//
|
||||||
|
// See also: SI, ComputeSI.
|
||||||
|
//
|
||||||
|
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
||||||
|
func ParseSI(input string) (float64, string, error) {
|
||||||
|
found := riParseRegex.FindStringSubmatch(input)
|
||||||
|
if len(found) != 4 {
|
||||||
|
return 0, "", errInvalid
|
||||||
|
}
|
||||||
|
mag := revSIPrefixTable[found[2]]
|
||||||
|
unit := found[3]
|
||||||
|
|
||||||
|
base, err := strconv.ParseFloat(found[1], 64)
|
||||||
|
return base * mag, unit, err
|
||||||
|
}
|
||||||
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
Normal file
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Seconds-based time units
|
||||||
|
const (
|
||||||
|
Day = 24 * time.Hour
|
||||||
|
Week = 7 * Day
|
||||||
|
Month = 30 * Day
|
||||||
|
Year = 12 * Month
|
||||||
|
LongTime = 37 * Year
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time formats a time into a relative string.
|
||||||
|
//
|
||||||
|
// Time(someT) -> "3 weeks ago"
|
||||||
|
func Time(then time.Time) string {
|
||||||
|
return RelTime(then, time.Now(), "ago", "from now")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RelTimeMagnitude struct contains a relative time point at which
|
||||||
|
// the relative format of time will switch to a new format string. A
|
||||||
|
// slice of these in ascending order by their "D" field is passed to
|
||||||
|
// CustomRelTime to format durations.
|
||||||
|
//
|
||||||
|
// The Format field is a string that may contain a "%s" which will be
|
||||||
|
// replaced with the appropriate signed label (e.g. "ago" or "from
|
||||||
|
// now") and a "%d" that will be replaced by the quantity.
|
||||||
|
//
|
||||||
|
// The DivBy field is the amount of time the time difference must be
|
||||||
|
// divided by in order to display correctly.
|
||||||
|
//
|
||||||
|
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
||||||
|
// DivBy should be time.Minute so whatever the duration is will be
|
||||||
|
// expressed in minutes.
|
||||||
|
type RelTimeMagnitude struct {
|
||||||
|
D time.Duration
|
||||||
|
Format string
|
||||||
|
DivBy time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultMagnitudes = []RelTimeMagnitude{
|
||||||
|
{time.Second, "now", time.Second},
|
||||||
|
{2 * time.Second, "1 second %s", 1},
|
||||||
|
{time.Minute, "%d seconds %s", time.Second},
|
||||||
|
{2 * time.Minute, "1 minute %s", 1},
|
||||||
|
{time.Hour, "%d minutes %s", time.Minute},
|
||||||
|
{2 * time.Hour, "1 hour %s", 1},
|
||||||
|
{Day, "%d hours %s", time.Hour},
|
||||||
|
{2 * Day, "1 day %s", 1},
|
||||||
|
{Week, "%d days %s", Day},
|
||||||
|
{2 * Week, "1 week %s", 1},
|
||||||
|
{Month, "%d weeks %s", Week},
|
||||||
|
{2 * Month, "1 month %s", 1},
|
||||||
|
{Year, "%d months %s", Month},
|
||||||
|
{18 * Month, "1 year %s", 1},
|
||||||
|
{2 * Year, "2 years %s", 1},
|
||||||
|
{LongTime, "%d years %s", Year},
|
||||||
|
{math.MaxInt64, "a long while %s", 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
// RelTime formats a time into a relative string.
|
||||||
|
//
|
||||||
|
// It takes two times and two labels. In addition to the generic time
|
||||||
|
// delta string (e.g. 5 minutes), the labels are used applied so that
|
||||||
|
// the label corresponding to the smaller time is applied.
|
||||||
|
//
|
||||||
|
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
||||||
|
func RelTime(a, b time.Time, albl, blbl string) string {
|
||||||
|
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomRelTime formats a time into a relative string.
|
||||||
|
//
|
||||||
|
// It takes two times two labels and a table of relative time formats.
|
||||||
|
// In addition to the generic time delta string (e.g. 5 minutes), the
|
||||||
|
// labels are used applied so that the label corresponding to the
|
||||||
|
// smaller time is applied.
|
||||||
|
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
|
||||||
|
lbl := albl
|
||||||
|
diff := b.Sub(a)
|
||||||
|
|
||||||
|
if a.After(b) {
|
||||||
|
lbl = blbl
|
||||||
|
diff = a.Sub(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := sort.Search(len(magnitudes), func(i int) bool {
|
||||||
|
return magnitudes[i].D > diff
|
||||||
|
})
|
||||||
|
|
||||||
|
if n >= len(magnitudes) {
|
||||||
|
n = len(magnitudes) - 1
|
||||||
|
}
|
||||||
|
mag := magnitudes[n]
|
||||||
|
args := []interface{}{}
|
||||||
|
escaped := false
|
||||||
|
for _, ch := range mag.Format {
|
||||||
|
if escaped {
|
||||||
|
switch ch {
|
||||||
|
case 's':
|
||||||
|
args = append(args, lbl)
|
||||||
|
case 'd':
|
||||||
|
args = append(args, diff/mag.DivBy)
|
||||||
|
}
|
||||||
|
escaped = false
|
||||||
|
} else {
|
||||||
|
escaped = ch == '%'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(mag.Format, args...)
|
||||||
|
}
|
||||||
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
freebsd_task:
|
||||||
|
name: 'FreeBSD'
|
||||||
|
freebsd_instance:
|
||||||
|
image_family: freebsd-14-2
|
||||||
|
install_script:
|
||||||
|
- pkg update -f
|
||||||
|
- pkg install -y go
|
||||||
|
test_script:
|
||||||
|
# run tests as user "cirrus" instead of root
|
||||||
|
- pw useradd cirrus -m
|
||||||
|
- chown -R cirrus:cirrus .
|
||||||
|
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||||
|
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||||
|
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
|
||||||
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# go test -c output
|
||||||
|
*.test
|
||||||
|
*.test.exe
|
||||||
|
|
||||||
|
# Output of go build ./cmd/fsnotify
|
||||||
|
/fsnotify
|
||||||
|
/fsnotify.exe
|
||||||
|
|
||||||
|
/test/kqueue
|
||||||
|
/test/a.out
|
||||||
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||||
|
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>
|
||||||
602
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
602
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,602 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
1.9.0 2024-04-04
|
||||||
|
----------------
|
||||||
|
|
||||||
|
### Changes and fixes
|
||||||
|
|
||||||
|
- all: make BufferedWatcher buffered again ([#657])
|
||||||
|
|
||||||
|
- inotify: fix race when adding/removing watches while a watched path is being
|
||||||
|
deleted ([#678], [#686])
|
||||||
|
|
||||||
|
- inotify: don't send empty event if a watched path is unmounted ([#655])
|
||||||
|
|
||||||
|
- inotify: don't register duplicate watches when watching both a symlink and its
|
||||||
|
target; previously that would get "half-added" and removing the second would
|
||||||
|
panic ([#679])
|
||||||
|
|
||||||
|
- kqueue: fix watching relative symlinks ([#681])
|
||||||
|
|
||||||
|
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
|
||||||
|
kqueue ([#682])
|
||||||
|
|
||||||
|
- illumos: don't send error if changed file is deleted while processing the
|
||||||
|
event ([#678])
|
||||||
|
|
||||||
|
|
||||||
|
[#657]: https://github.com/fsnotify/fsnotify/pull/657
|
||||||
|
[#678]: https://github.com/fsnotify/fsnotify/pull/678
|
||||||
|
[#686]: https://github.com/fsnotify/fsnotify/pull/686
|
||||||
|
[#655]: https://github.com/fsnotify/fsnotify/pull/655
|
||||||
|
[#681]: https://github.com/fsnotify/fsnotify/pull/681
|
||||||
|
[#679]: https://github.com/fsnotify/fsnotify/pull/679
|
||||||
|
[#682]: https://github.com/fsnotify/fsnotify/pull/682
|
||||||
|
|
||||||
|
1.8.0 2024-10-31
|
||||||
|
----------------
|
||||||
|
|
||||||
|
### Additions
|
||||||
|
|
||||||
|
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
|
||||||
|
|
||||||
|
### Changes and fixes
|
||||||
|
|
||||||
|
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
|
||||||
|
|
||||||
|
- kqueue: ignore events with Ident=0 ([#590])
|
||||||
|
|
||||||
|
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
|
||||||
|
|
||||||
|
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
|
||||||
|
|
||||||
|
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
|
||||||
|
|
||||||
|
- inotify: fix panic when calling Remove() in a goroutine ([#650])
|
||||||
|
|
||||||
|
- fen: allow watching subdirectories of watched directories ([#621])
|
||||||
|
|
||||||
|
[#590]: https://github.com/fsnotify/fsnotify/pull/590
|
||||||
|
[#610]: https://github.com/fsnotify/fsnotify/pull/610
|
||||||
|
[#617]: https://github.com/fsnotify/fsnotify/pull/617
|
||||||
|
[#619]: https://github.com/fsnotify/fsnotify/pull/619
|
||||||
|
[#620]: https://github.com/fsnotify/fsnotify/pull/620
|
||||||
|
[#621]: https://github.com/fsnotify/fsnotify/pull/621
|
||||||
|
[#625]: https://github.com/fsnotify/fsnotify/pull/625
|
||||||
|
[#650]: https://github.com/fsnotify/fsnotify/pull/650
|
||||||
|
|
||||||
|
1.7.0 - 2023-10-22
|
||||||
|
------------------
|
||||||
|
This version of fsnotify needs Go 1.17.
|
||||||
|
|
||||||
|
### Additions
|
||||||
|
|
||||||
|
- illumos: add FEN backend to support illumos and Solaris. ([#371])
|
||||||
|
|
||||||
|
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
|
||||||
|
in cases where you can't control the kernel buffer and receive a large number
|
||||||
|
of events in bursts. ([#550], [#572])
|
||||||
|
|
||||||
|
- all: add `AddWith()`, which is identical to `Add()` but allows passing
|
||||||
|
options. ([#521])
|
||||||
|
|
||||||
|
- windows: allow setting the ReadDirectoryChangesW() buffer size with
|
||||||
|
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
|
||||||
|
works on all platforms and is enough for most purposes, but in some cases a
|
||||||
|
highest buffer is needed. ([#521])
|
||||||
|
|
||||||
|
### Changes and fixes
|
||||||
|
|
||||||
|
- inotify: remove watcher if a watched path is renamed ([#518])
|
||||||
|
|
||||||
|
After a rename the reported name wasn't updated, or even an empty string.
|
||||||
|
Inotify doesn't provide any good facilities to update it, so just remove the
|
||||||
|
watcher. This is already how it worked on kqueue and FEN.
|
||||||
|
|
||||||
|
On Windows this does work, and remains working.
|
||||||
|
|
||||||
|
- windows: don't listen for file attribute changes ([#520])
|
||||||
|
|
||||||
|
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
|
||||||
|
with no way to see if they're a file write or attribute change, so would show
|
||||||
|
up as a fsnotify.Write event. This is never useful, and could result in many
|
||||||
|
spurious Write events.
|
||||||
|
|
||||||
|
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
|
||||||
|
|
||||||
|
Before it would merely return "short read", making it hard to detect this
|
||||||
|
error.
|
||||||
|
|
||||||
|
- kqueue: make sure events for all files are delivered properly when removing a
|
||||||
|
watched directory ([#526])
|
||||||
|
|
||||||
|
Previously they would get sent with `""` (empty string) or `"."` as the path
|
||||||
|
name.
|
||||||
|
|
||||||
|
- kqueue: don't emit spurious Create events for symbolic links ([#524])
|
||||||
|
|
||||||
|
The link would get resolved but kqueue would "forget" it already saw the link
|
||||||
|
itself, resulting on a Create for every Write event for the directory.
|
||||||
|
|
||||||
|
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
|
||||||
|
|
||||||
|
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
|
||||||
|
`backend_other.go`, making it easier to use on unsupported platforms such as
|
||||||
|
WASM, AIX, etc. ([#528])
|
||||||
|
|
||||||
|
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
|
||||||
|
Google AppEngine forbids usage of the unsafe package so the inotify backend
|
||||||
|
won't compile there.
|
||||||
|
|
||||||
|
[#371]: https://github.com/fsnotify/fsnotify/pull/371
|
||||||
|
[#516]: https://github.com/fsnotify/fsnotify/pull/516
|
||||||
|
[#518]: https://github.com/fsnotify/fsnotify/pull/518
|
||||||
|
[#520]: https://github.com/fsnotify/fsnotify/pull/520
|
||||||
|
[#521]: https://github.com/fsnotify/fsnotify/pull/521
|
||||||
|
[#524]: https://github.com/fsnotify/fsnotify/pull/524
|
||||||
|
[#525]: https://github.com/fsnotify/fsnotify/pull/525
|
||||||
|
[#526]: https://github.com/fsnotify/fsnotify/pull/526
|
||||||
|
[#528]: https://github.com/fsnotify/fsnotify/pull/528
|
||||||
|
[#537]: https://github.com/fsnotify/fsnotify/pull/537
|
||||||
|
[#550]: https://github.com/fsnotify/fsnotify/pull/550
|
||||||
|
[#572]: https://github.com/fsnotify/fsnotify/pull/572
|
||||||
|
|
||||||
|
1.6.0 - 2022-10-13
|
||||||
|
------------------
|
||||||
|
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||||
|
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||||
|
|
||||||
|
### Additions
|
||||||
|
|
||||||
|
- all: add `Event.Has()` and `Op.Has()` ([#477])
|
||||||
|
|
||||||
|
This makes checking events a lot easier; for example:
|
||||||
|
|
||||||
|
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||||
|
}
|
||||||
|
|
||||||
|
Becomes:
|
||||||
|
|
||||||
|
if event.Has(Write) && !event.Has(Remove) {
|
||||||
|
}
|
||||||
|
|
||||||
|
- all: add cmd/fsnotify ([#463])
|
||||||
|
|
||||||
|
A command-line utility for testing and some examples.
|
||||||
|
|
||||||
|
### Changes and fixes
|
||||||
|
|
||||||
|
- inotify: don't ignore events for files that don't exist ([#260], [#470])
|
||||||
|
|
||||||
|
Previously the inotify watcher would call `os.Lstat()` to check if a file
|
||||||
|
still exists before emitting events.
|
||||||
|
|
||||||
|
This was inconsistent with other platforms and resulted in inconsistent event
|
||||||
|
reporting (e.g. when a file is quickly removed and re-created), and generally
|
||||||
|
a source of confusion. It was added in 2013 to fix a memory leak that no
|
||||||
|
longer exists.
|
||||||
|
|
||||||
|
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
|
||||||
|
not watched ([#460])
|
||||||
|
|
||||||
|
- inotify: replace epoll() with non-blocking inotify ([#434])
|
||||||
|
|
||||||
|
Non-blocking inotify was not generally available at the time this library was
|
||||||
|
written in 2014, but now it is. As a result, the minimum Linux version is
|
||||||
|
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
|
||||||
|
|
||||||
|
- kqueue: don't check for events every 100ms ([#480])
|
||||||
|
|
||||||
|
The watcher would wake up every 100ms, even when there was nothing to do. Now
|
||||||
|
it waits until there is something to do.
|
||||||
|
|
||||||
|
- macos: retry opening files on EINTR ([#475])
|
||||||
|
|
||||||
|
- kqueue: skip unreadable files ([#479])
|
||||||
|
|
||||||
|
kqueue requires a file descriptor for every file in a directory; this would
|
||||||
|
fail if a file was unreadable by the current user. Now these files are simply
|
||||||
|
skipped.
|
||||||
|
|
||||||
|
- windows: fix renaming a watched directory if the parent is also watched ([#370])
|
||||||
|
|
||||||
|
- windows: increase buffer size from 4K to 64K ([#485])
|
||||||
|
|
||||||
|
- windows: close file handle on Remove() ([#288])
|
||||||
|
|
||||||
|
- kqueue: put pathname in the error if watching a file fails ([#471])
|
||||||
|
|
||||||
|
- inotify, windows: calling Close() more than once could race ([#465])
|
||||||
|
|
||||||
|
- kqueue: improve Close() performance ([#233])
|
||||||
|
|
||||||
|
- all: various documentation additions and clarifications.
|
||||||
|
|
||||||
|
[#233]: https://github.com/fsnotify/fsnotify/pull/233
|
||||||
|
[#260]: https://github.com/fsnotify/fsnotify/pull/260
|
||||||
|
[#288]: https://github.com/fsnotify/fsnotify/pull/288
|
||||||
|
[#370]: https://github.com/fsnotify/fsnotify/pull/370
|
||||||
|
[#434]: https://github.com/fsnotify/fsnotify/pull/434
|
||||||
|
[#460]: https://github.com/fsnotify/fsnotify/pull/460
|
||||||
|
[#463]: https://github.com/fsnotify/fsnotify/pull/463
|
||||||
|
[#465]: https://github.com/fsnotify/fsnotify/pull/465
|
||||||
|
[#470]: https://github.com/fsnotify/fsnotify/pull/470
|
||||||
|
[#471]: https://github.com/fsnotify/fsnotify/pull/471
|
||||||
|
[#475]: https://github.com/fsnotify/fsnotify/pull/475
|
||||||
|
[#477]: https://github.com/fsnotify/fsnotify/pull/477
|
||||||
|
[#479]: https://github.com/fsnotify/fsnotify/pull/479
|
||||||
|
[#480]: https://github.com/fsnotify/fsnotify/pull/480
|
||||||
|
[#485]: https://github.com/fsnotify/fsnotify/pull/485
|
||||||
|
|
||||||
|
## [1.5.4] - 2022-04-25
|
||||||
|
|
||||||
|
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
|
||||||
|
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
|
||||||
|
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
|
||||||
|
|
||||||
|
## [1.5.3] - 2022-04-22
|
||||||
|
|
||||||
|
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
|
||||||
|
|
||||||
|
## [1.5.2] - 2022-04-21
|
||||||
|
|
||||||
|
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
|
||||||
|
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
|
||||||
|
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
|
||||||
|
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
|
||||||
|
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
|
||||||
|
|
||||||
|
## [1.5.1] - 2021-08-24
|
||||||
|
|
||||||
|
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
|
||||||
|
|
||||||
|
## [1.5.0] - 2021-08-20
|
||||||
|
|
||||||
|
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||||
|
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
|
||||||
|
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
|
||||||
|
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
|
||||||
|
[#378](https://github.com/fsnotify/fsnotify/pull/378)
|
||||||
|
[#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||||
|
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
||||||
|
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
||||||
|
|
||||||
|
## [1.4.9] - 2020-03-11
|
||||||
|
|
||||||
|
* Move example usage to the readme #329. This may resolve #328.
|
||||||
|
|
||||||
|
## [1.4.8] - 2020-03-10
|
||||||
|
|
||||||
|
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
|
||||||
|
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
|
||||||
|
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
|
||||||
|
* CI: Less verbosity (@nathany #267)
|
||||||
|
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
|
||||||
|
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
|
||||||
|
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
|
||||||
|
* CI: Add windows to travis matrix (@cpuguy83 #284)
|
||||||
|
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
|
||||||
|
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
|
||||||
|
* Linux: open files with close-on-exec (@linxiulei #273)
|
||||||
|
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
|
||||||
|
* Project: Add go.mod (@nathany #309)
|
||||||
|
* Project: Revise editor config (@nathany #309)
|
||||||
|
* Project: Update copyright for 2019 (@nathany #309)
|
||||||
|
* CI: Drop go1.8 from CI matrix (@nathany #309)
|
||||||
|
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
|
||||||
|
|
||||||
|
## [1.4.7] - 2018-01-09
|
||||||
|
|
||||||
|
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||||
|
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||||
|
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
||||||
|
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
||||||
|
* Docs: Moved FAQ into the README (thanks @vahe)
|
||||||
|
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||||
|
* Docs: replace references to OS X with macOS
|
||||||
|
|
||||||
|
## [1.4.2] - 2016-10-10
|
||||||
|
|
||||||
|
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||||
|
|
||||||
|
## [1.4.1] - 2016-10-04
|
||||||
|
|
||||||
|
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||||
|
|
||||||
|
## [1.4.0] - 2016-10-01
|
||||||
|
|
||||||
|
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||||
|
|
||||||
|
## [1.3.1] - 2016-06-28
|
||||||
|
|
||||||
|
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||||
|
|
||||||
|
## [1.3.0] - 2016-04-19
|
||||||
|
|
||||||
|
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||||
|
|
||||||
|
## [1.2.10] - 2016-03-02
|
||||||
|
|
||||||
|
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||||
|
|
||||||
|
## [1.2.9] - 2016-01-13
|
||||||
|
|
||||||
|
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||||
|
|
||||||
|
## [1.2.8] - 2015-12-17
|
||||||
|
|
||||||
|
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||||
|
* inotify: fix race in test
|
||||||
|
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||||
|
|
||||||
|
## [1.2.5] - 2015-10-17
|
||||||
|
|
||||||
|
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||||
|
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||||
|
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||||
|
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||||
|
|
||||||
|
## [1.2.1] - 2015-10-14
|
||||||
|
|
||||||
|
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||||
|
|
||||||
|
## [1.2.0] - 2015-02-08
|
||||||
|
|
||||||
|
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||||
|
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||||
|
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||||
|
|
||||||
|
## [1.1.1] - 2015-02-05
|
||||||
|
|
||||||
|
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||||
|
|
||||||
|
## [1.1.0] - 2014-12-12
|
||||||
|
|
||||||
|
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||||
|
* add low-level functions
|
||||||
|
* only need to store flags on directories
|
||||||
|
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||||
|
* done can be an unbuffered channel
|
||||||
|
* remove calls to os.NewSyscallError
|
||||||
|
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||||
|
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||||
|
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||||
|
|
||||||
|
## [1.0.4] - 2014-09-07
|
||||||
|
|
||||||
|
* kqueue: add dragonfly to the build tags.
|
||||||
|
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||||
|
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||||
|
|
||||||
|
## [1.0.3] - 2014-08-19
|
||||||
|
|
||||||
|
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||||
|
|
||||||
|
## [1.0.2] - 2014-08-17
|
||||||
|
|
||||||
|
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||||
|
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||||
|
|
||||||
|
## [1.0.0] - 2014-08-15
|
||||||
|
|
||||||
|
* [API] Remove AddWatch on Windows, use Add.
|
||||||
|
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||||
|
* Minor updates based on feedback from golint.
|
||||||
|
|
||||||
|
## dev / 2014-07-09
|
||||||
|
|
||||||
|
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||||
|
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||||
|
|
||||||
|
## dev / 2014-07-04
|
||||||
|
|
||||||
|
* kqueue: fix incorrect mutex used in Close()
|
||||||
|
* Update example to demonstrate usage of Op.
|
||||||
|
|
||||||
|
## dev / 2014-06-28
|
||||||
|
|
||||||
|
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||||
|
* Fix for String() method on Event (thanks Alex Brainman)
|
||||||
|
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||||
|
|
||||||
|
## dev / 2014-06-21
|
||||||
|
|
||||||
|
* Events channel of type Event rather than *Event.
|
||||||
|
* [internal] use syscall constants directly for inotify and kqueue.
|
||||||
|
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||||
|
|
||||||
|
## dev / 2014-06-19
|
||||||
|
|
||||||
|
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||||
|
* [internal] remove cookie from Event struct (unused).
|
||||||
|
* [internal] Event struct has the same definition across every OS.
|
||||||
|
* [internal] remove internal watch and removeWatch methods.
|
||||||
|
|
||||||
|
## dev / 2014-06-12
|
||||||
|
|
||||||
|
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||||
|
* [API] Pluralized channel names: Events and Errors.
|
||||||
|
* [API] Renamed FileEvent struct to Event.
|
||||||
|
* [API] Op constants replace methods like IsCreate().
|
||||||
|
|
||||||
|
## dev / 2014-06-12
|
||||||
|
|
||||||
|
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||||
|
|
||||||
|
## dev / 2014-05-23
|
||||||
|
|
||||||
|
* [API] Remove current implementation of WatchFlags.
|
||||||
|
* current implementation doesn't take advantage of OS for efficiency
|
||||||
|
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||||
|
* no tests for the current implementation
|
||||||
|
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||||
|
|
||||||
|
## [0.9.3] - 2014-12-31
|
||||||
|
|
||||||
|
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||||
|
|
||||||
|
## [0.9.2] - 2014-08-17
|
||||||
|
|
||||||
|
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||||
|
|
||||||
|
## [0.9.1] - 2014-06-12
|
||||||
|
|
||||||
|
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||||
|
|
||||||
|
## [0.9.0] - 2014-01-17
|
||||||
|
|
||||||
|
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||||
|
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||||
|
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||||
|
|
||||||
|
## [0.8.12] - 2013-11-13
|
||||||
|
|
||||||
|
* [API] Remove FD_SET and friends from Linux adapter
|
||||||
|
|
||||||
|
## [0.8.11] - 2013-11-02
|
||||||
|
|
||||||
|
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||||
|
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||||
|
|
||||||
|
## [0.8.10] - 2013-10-19
|
||||||
|
|
||||||
|
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||||
|
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||||
|
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||||
|
|
||||||
|
## [0.8.9] - 2013-09-08
|
||||||
|
|
||||||
|
* [Doc] Contributing (thanks @nathany)
|
||||||
|
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||||
|
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||||
|
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||||
|
|
||||||
|
## [0.8.8] - 2013-06-17
|
||||||
|
|
||||||
|
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||||
|
|
||||||
|
## [0.8.7] - 2013-06-03
|
||||||
|
|
||||||
|
* [API] Make syscall flags internal
|
||||||
|
* [Fix] inotify: ignore event changes
|
||||||
|
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||||
|
* [Fix] tests on Windows
|
||||||
|
* lower case error messages
|
||||||
|
|
||||||
|
## [0.8.6] - 2013-05-23
|
||||||
|
|
||||||
|
* kqueue: Use EVT_ONLY flag on Darwin
|
||||||
|
* [Doc] Update README with full example
|
||||||
|
|
||||||
|
## [0.8.5] - 2013-05-09
|
||||||
|
|
||||||
|
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||||
|
|
||||||
|
## [0.8.4] - 2013-04-07
|
||||||
|
|
||||||
|
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||||
|
|
||||||
|
## [0.8.3] - 2013-03-13
|
||||||
|
|
||||||
|
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||||
|
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||||
|
|
||||||
|
## [0.8.2] - 2013-02-07
|
||||||
|
|
||||||
|
* [Doc] add Authors
|
||||||
|
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||||
|
|
||||||
|
## [0.8.1] - 2013-01-09
|
||||||
|
|
||||||
|
* [Fix] Windows path separators
|
||||||
|
* [Doc] BSD License
|
||||||
|
|
||||||
|
## [0.8.0] - 2012-11-09
|
||||||
|
|
||||||
|
* kqueue: directory watching improvements (thanks @vmirage)
|
||||||
|
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||||
|
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||||
|
|
||||||
|
## [0.7.4] - 2012-10-09
|
||||||
|
|
||||||
|
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||||
|
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||||
|
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||||
|
* [Fix] kqueue: modify after recreation of file
|
||||||
|
|
||||||
|
## [0.7.3] - 2012-09-27
|
||||||
|
|
||||||
|
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||||
|
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||||
|
|
||||||
|
## [0.7.2] - 2012-09-01
|
||||||
|
|
||||||
|
* kqueue: events for created directories
|
||||||
|
|
||||||
|
## [0.7.1] - 2012-07-14
|
||||||
|
|
||||||
|
* [Fix] for renaming files
|
||||||
|
|
||||||
|
## [0.7.0] - 2012-07-02
|
||||||
|
|
||||||
|
* [Feature] FSNotify flags
|
||||||
|
* [Fix] inotify: Added file name back to event path
|
||||||
|
|
||||||
|
## [0.6.0] - 2012-06-06
|
||||||
|
|
||||||
|
* kqueue: watch files after directory created (thanks @tmc)
|
||||||
|
|
||||||
|
## [0.5.1] - 2012-05-22
|
||||||
|
|
||||||
|
* [Fix] inotify: remove all watches before Close()
|
||||||
|
|
||||||
|
## [0.5.0] - 2012-05-03
|
||||||
|
|
||||||
|
* [API] kqueue: return errors during watch instead of sending over channel
|
||||||
|
* kqueue: match symlink behavior on Linux
|
||||||
|
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||||
|
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||||
|
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||||
|
|
||||||
|
## [0.4.0] - 2012-03-30
|
||||||
|
|
||||||
|
* Go 1 released: build with go tool
|
||||||
|
* [Feature] Windows support using winfsnotify
|
||||||
|
* Windows does not have attribute change notifications
|
||||||
|
* Roll attribute notifications into IsModify
|
||||||
|
|
||||||
|
## [0.3.0] - 2012-02-19
|
||||||
|
|
||||||
|
* kqueue: add files when watch directory
|
||||||
|
|
||||||
|
## [0.2.0] - 2011-12-30
|
||||||
|
|
||||||
|
* update to latest Go weekly code
|
||||||
|
|
||||||
|
## [0.1.0] - 2011-10-19
|
||||||
|
|
||||||
|
* kqueue: add watch on file creation to match inotify
|
||||||
|
* kqueue: create file event
|
||||||
|
* inotify: ignore `IN_IGNORED` events
|
||||||
|
* event String()
|
||||||
|
* linux: common FileEvent functions
|
||||||
|
* initial commit
|
||||||
|
|
||||||
|
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||||
|
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||||
|
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||||
|
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||||
|
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||||
|
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||||
|
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||||
|
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||||
|
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||||
|
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||||
|
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||||
|
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||||
|
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||||
|
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||||
|
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||||
|
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||||
|
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||||
|
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||||
145
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
145
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||||
|
merge PRs in a reasonable timeframe, but please be aware that:
|
||||||
|
|
||||||
|
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
|
||||||
|
can just send PRs, but they may end up being rejected for one reason or the
|
||||||
|
other.
|
||||||
|
|
||||||
|
- fsnotify is a cross-platform library, and changes must work reasonably well on
|
||||||
|
all supported platforms.
|
||||||
|
|
||||||
|
- Changes will need to be compatible; old code should still compile, and the
|
||||||
|
runtime behaviour can't change in ways that are likely to lead to problems for
|
||||||
|
users.
|
||||||
|
|
||||||
|
Testing
|
||||||
|
-------
|
||||||
|
Just `go test ./...` runs all the tests; the CI runs this on all supported
|
||||||
|
platforms. Testing different platforms locally can be done with something like
|
||||||
|
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
|
||||||
|
|
||||||
|
Use the `-short` flag to make the "stress test" run faster.
|
||||||
|
|
||||||
|
Writing new tests
|
||||||
|
-----------------
|
||||||
|
Scripts in the testdata directory allow creating test cases in a "shell-like"
|
||||||
|
syntax. The basic format is:
|
||||||
|
|
||||||
|
script
|
||||||
|
|
||||||
|
Output:
|
||||||
|
desired output
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
# Create a new empty file with some data.
|
||||||
|
watch /
|
||||||
|
echo data >/file
|
||||||
|
|
||||||
|
Output:
|
||||||
|
create /file
|
||||||
|
write /file
|
||||||
|
|
||||||
|
Just create a new file to add a new test; select which tests to run with
|
||||||
|
`-run TestScript/[path]`.
|
||||||
|
|
||||||
|
script
|
||||||
|
------
|
||||||
|
The script is a "shell-like" script:
|
||||||
|
|
||||||
|
cmd arg arg
|
||||||
|
|
||||||
|
Comments are supported with `#`:
|
||||||
|
|
||||||
|
# Comment
|
||||||
|
cmd arg arg # Comment
|
||||||
|
|
||||||
|
All operations are done in a temp directory; a path like "/foo" is rewritten to
|
||||||
|
"/tmp/TestFoo/foo".
|
||||||
|
|
||||||
|
Arguments can be quoted with `"` or `'`; there are no escapes and they're
|
||||||
|
functionally identical right now, but this may change in the future, so best to
|
||||||
|
assume shell-like rules.
|
||||||
|
|
||||||
|
touch "/file with spaces"
|
||||||
|
|
||||||
|
End-of-line escapes with `\` are not supported.
|
||||||
|
|
||||||
|
### Supported commands
|
||||||
|
|
||||||
|
watch path [ops] # Watch the path, reporting events for it. Nothing is
|
||||||
|
# watched by default. Optionally a list of ops can be
|
||||||
|
# given, as with AddWith(path, WithOps(...)).
|
||||||
|
unwatch path # Stop watching the path.
|
||||||
|
watchlist n # Assert watchlist length.
|
||||||
|
|
||||||
|
stop # Stop running the script; for debugging.
|
||||||
|
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
|
||||||
|
parallel by default, so -parallel=1 is probably a good
|
||||||
|
idea).
|
||||||
|
print [any strings] # Print text to stdout; for debugging.
|
||||||
|
|
||||||
|
touch path
|
||||||
|
mkdir [-p] dir
|
||||||
|
ln -s target link # Only ln -s supported.
|
||||||
|
mkfifo path
|
||||||
|
mknod dev path
|
||||||
|
mv src dst
|
||||||
|
rm [-r] path
|
||||||
|
chmod mode path # Octal only
|
||||||
|
sleep time-in-ms
|
||||||
|
|
||||||
|
cat path # Read path (does nothing with the data; just reads it).
|
||||||
|
echo str >>path # Append "str" to "path".
|
||||||
|
echo str >path # Truncate "path" and write "str".
|
||||||
|
|
||||||
|
require reason # Skip the test if "reason" is true; "skip" and
|
||||||
|
skip reason # "require" behave identical; it supports both for
|
||||||
|
# readability. Possible reasons are:
|
||||||
|
#
|
||||||
|
# always Always skip this test.
|
||||||
|
# symlink Symlinks are supported (requires admin
|
||||||
|
# permissions on Windows).
|
||||||
|
# mkfifo Platform doesn't support FIFO named sockets.
|
||||||
|
# mknod Platform doesn't support device nodes.
|
||||||
|
|
||||||
|
|
||||||
|
output
|
||||||
|
------
|
||||||
|
After `Output:` the desired output is given; this is indented by convention, but
|
||||||
|
that's not required.
|
||||||
|
|
||||||
|
The format of that is:
|
||||||
|
|
||||||
|
# Comment
|
||||||
|
event path # Comment
|
||||||
|
|
||||||
|
system:
|
||||||
|
event path
|
||||||
|
system2:
|
||||||
|
event path
|
||||||
|
|
||||||
|
Every event is one line, and any whitespace between the event and path are
|
||||||
|
ignored. The path can optionally be surrounded in ". Anything after a "#" is
|
||||||
|
ignored.
|
||||||
|
|
||||||
|
Platform-specific tests can be added after GOOS; for example:
|
||||||
|
|
||||||
|
watch /
|
||||||
|
touch /file
|
||||||
|
|
||||||
|
Output:
|
||||||
|
# Tested if nothing else matches
|
||||||
|
create /file
|
||||||
|
|
||||||
|
# Windows-specific test.
|
||||||
|
windows:
|
||||||
|
write /file
|
||||||
|
|
||||||
|
You can specify multiple platforms with a comma (e.g. "windows, linux:").
|
||||||
|
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
|
||||||
|
|
||||||
|
|
||||||
|
[goon]: https://github.com/arp242/goon
|
||||||
|
[Vagrant]: https://www.vagrantup.com/
|
||||||
|
[integration_test.go]: /integration_test.go
|
||||||
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
Copyright © 2012 The Go Authors. All rights reserved.
|
||||||
|
Copyright © fsnotify Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer in the documentation and/or
|
||||||
|
other materials provided with the distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its contributors may be used
|
||||||
|
to endorse or promote products derived from this software without specific
|
||||||
|
prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||||
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
182
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
182
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||||
|
Windows, Linux, macOS, BSD, and illumos.
|
||||||
|
|
||||||
|
Go 1.17 or newer is required; the full documentation is at
|
||||||
|
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Platform support:
|
||||||
|
|
||||||
|
| Backend | OS | Status |
|
||||||
|
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
|
||||||
|
| inotify | Linux | Supported |
|
||||||
|
| kqueue | BSD, macOS | Supported |
|
||||||
|
| ReadDirectoryChangesW | Windows | Supported |
|
||||||
|
| FEN | illumos | Supported |
|
||||||
|
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||||
|
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||||
|
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||||
|
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||||
|
|
||||||
|
Linux and illumos should include Android and Solaris, but these are currently
|
||||||
|
untested.
|
||||||
|
|
||||||
|
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||||
|
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
A basic example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/fsnotify/fsnotify"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create new watcher.
|
||||||
|
watcher, err := fsnotify.NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer watcher.Close()
|
||||||
|
|
||||||
|
// Start listening for events.
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event, ok := <-watcher.Events:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Println("event:", event)
|
||||||
|
if event.Has(fsnotify.Write) {
|
||||||
|
log.Println("modified file:", event.Name)
|
||||||
|
}
|
||||||
|
case err, ok := <-watcher.Errors:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Println("error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Add a path.
|
||||||
|
err = watcher.Add("/tmp")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block main goroutine forever.
|
||||||
|
<-make(chan struct{})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
|
||||||
|
run with:
|
||||||
|
|
||||||
|
% go run ./cmd/fsnotify
|
||||||
|
|
||||||
|
Further detailed documentation can be found in godoc:
|
||||||
|
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||||
|
|
||||||
|
FAQ
|
||||||
|
---
|
||||||
|
### Will a file still be watched when it's moved to another directory?
|
||||||
|
No, not unless you are watching the location it was moved to.
|
||||||
|
|
||||||
|
### Are subdirectories watched?
|
||||||
|
No, you must add watches for any directory you want to watch (a recursive
|
||||||
|
watcher is on the roadmap: [#18]).
|
||||||
|
|
||||||
|
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||||
|
|
||||||
|
### Do I have to watch the Error and Event channels in a goroutine?
|
||||||
|
Yes. You can read both channels in the same goroutine using `select` (you don't
|
||||||
|
need a separate goroutine for both channels; see the example).
|
||||||
|
|
||||||
|
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||||
|
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||||
|
protocols does not provide network level support for file notifications, and
|
||||||
|
neither do the /proc and /sys virtual filesystems.
|
||||||
|
|
||||||
|
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
||||||
|
|
||||||
|
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||||
|
|
||||||
|
### Why do I get many Chmod events?
|
||||||
|
Some programs may generate a lot of attribute changes; for example Spotlight on
|
||||||
|
macOS, anti-virus programs, backup applications, and some others are known to do
|
||||||
|
this. As a rule, it's typically best to ignore Chmod events. They're often not
|
||||||
|
useful, and tend to cause problems.
|
||||||
|
|
||||||
|
Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||||
|
temporary workaround is to add your folder(s) to the *Spotlight Privacy
|
||||||
|
settings* until we have a native FSEvents implementation (see [#11]).
|
||||||
|
|
||||||
|
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||||
|
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||||
|
|
||||||
|
### Watching a file doesn't work well
|
||||||
|
Watching individual files (rather than directories) is generally not recommended
|
||||||
|
as many programs (especially editors) update files atomically: it will write to
|
||||||
|
a temporary file which is then moved to to destination, overwriting the original
|
||||||
|
(or some variant thereof). The watcher on the original file is now lost, as that
|
||||||
|
no longer exists.
|
||||||
|
|
||||||
|
The upshot of this is that a power failure or crash won't leave a half-written
|
||||||
|
file.
|
||||||
|
|
||||||
|
Watch the parent directory and use `Event.Name` to filter out files you're not
|
||||||
|
interested in. There is an example of this in `cmd/fsnotify/file.go`.
|
||||||
|
|
||||||
|
Platform-specific notes
|
||||||
|
-----------------------
|
||||||
|
### Linux
|
||||||
|
When a file is removed a REMOVE event won't be emitted until all file
|
||||||
|
descriptors are closed; it will emit a CHMOD instead:
|
||||||
|
|
||||||
|
fp := os.Open("file")
|
||||||
|
os.Remove("file") // CHMOD
|
||||||
|
fp.Close() // REMOVE
|
||||||
|
|
||||||
|
This is the event that inotify sends, so not much can be changed about this.
|
||||||
|
|
||||||
|
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
|
||||||
|
the number of watches per user, and `fs.inotify.max_user_instances` specifies
|
||||||
|
the maximum number of inotify instances per user. Every Watcher you create is an
|
||||||
|
"instance", and every path you add is a "watch".
|
||||||
|
|
||||||
|
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
|
||||||
|
`/proc/sys/fs/inotify/max_user_instances`
|
||||||
|
|
||||||
|
To increase them you can use `sysctl` or write the value to proc file:
|
||||||
|
|
||||||
|
# The default values on Linux 5.18
|
||||||
|
sysctl fs.inotify.max_user_watches=124983
|
||||||
|
sysctl fs.inotify.max_user_instances=128
|
||||||
|
|
||||||
|
To make the changes persist on reboot edit `/etc/sysctl.conf` or
|
||||||
|
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
|
||||||
|
distro's documentation):
|
||||||
|
|
||||||
|
fs.inotify.max_user_watches=124983
|
||||||
|
fs.inotify.max_user_instances=128
|
||||||
|
|
||||||
|
Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
|
files" error.
|
||||||
|
|
||||||
|
### kqueue (macOS, all BSD systems)
|
||||||
|
kqueue requires opening a file descriptor for every file that's being watched;
|
||||||
|
so if you're watching a directory with five files then that's six file
|
||||||
|
descriptors. You will run in to your system's "max open files" limit faster on
|
||||||
|
these platforms.
|
||||||
|
|
||||||
|
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||||
|
control the maximum number of open files.
|
||||||
467
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
Normal file
467
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
Normal file
@@ -0,0 +1,467 @@
|
|||||||
|
//go:build solaris
|
||||||
|
|
||||||
|
// FEN backend for illumos (supported) and Solaris (untested, but should work).
|
||||||
|
//
|
||||||
|
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fsnotify/fsnotify/internal"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fen struct {
|
||||||
|
*shared
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
port *unix.EventPort
|
||||||
|
dirs map[string]Op // Explicitly watched directories
|
||||||
|
watches map[string]Op // Explicitly watched non-directories
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultBufferSize = 0
|
||||||
|
|
||||||
|
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||||
|
w := &fen{
|
||||||
|
shared: newShared(ev, errs),
|
||||||
|
Events: ev,
|
||||||
|
Errors: errs,
|
||||||
|
dirs: make(map[string]Op),
|
||||||
|
watches: make(map[string]Op),
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
w.port, err = unix.NewEventPort()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) Close() error {
|
||||||
|
if w.shared.close() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.port.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) Add(name string) error { return w.AddWith(name) }
|
||||||
|
|
||||||
|
func (w *fen) AddWith(name string, opts ...addOpt) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), name)
|
||||||
|
}
|
||||||
|
|
||||||
|
with := getOptions(opts...)
|
||||||
|
if !w.xSupports(with.op) {
|
||||||
|
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently we resolve symlinks that were explicitly requested to be
|
||||||
|
// watched. Otherwise we would use LStat here.
|
||||||
|
stat, err := os.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Associate all files in the directory.
|
||||||
|
if stat.IsDir() {
|
||||||
|
err := w.handleDirectory(name, stat, true, w.associateFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.dirs[name] = with.op
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.associateFile(name, stat, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches[name] = with.op
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) Remove(name string) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !w.port.PathIsWatched(name) {
|
||||||
|
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The user has expressed an intent. Immediately remove this name from
|
||||||
|
// whichever watch list it might be in. If it's not in there the delete
|
||||||
|
// doesn't cause harm.
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches, name)
|
||||||
|
delete(w.dirs, name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
stat, err := os.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove associations for every file in the directory.
|
||||||
|
if stat.IsDir() {
|
||||||
|
err := w.handleDirectory(name, stat, false, w.dissociateFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.port.DissociatePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||||
|
func (w *fen) readEvents() {
|
||||||
|
// If this function returns, the watcher has been closed and we can close
|
||||||
|
// these channels
|
||||||
|
defer func() {
|
||||||
|
close(w.Errors)
|
||||||
|
close(w.Events)
|
||||||
|
}()
|
||||||
|
|
||||||
|
pevents := make([]unix.PortEvent, 8)
|
||||||
|
for {
|
||||||
|
count, err := w.port.Get(pevents, 1, nil)
|
||||||
|
if err != nil && err != unix.ETIME {
|
||||||
|
// Interrupted system call (count should be 0) ignore and continue
|
||||||
|
if errors.Is(err, unix.EINTR) && count == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Get failed because we called w.Close()
|
||||||
|
if errors.Is(err, unix.EBADF) && w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// There was an error not caused by calling w.Close()
|
||||||
|
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := pevents[:count]
|
||||||
|
for _, pevent := range p {
|
||||||
|
if pevent.Source != unix.PORT_SOURCE_FILE {
|
||||||
|
// Event from unexpected source received; should never happen.
|
||||||
|
if !w.sendError(errors.New("Event from unexpected source received")) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
internal.Debug(pevent.Path, pevent.Events)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.handleEvent(&pevent)
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||||
|
files, err := os.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle all children of the directory.
|
||||||
|
for _, entry := range files {
|
||||||
|
finfo, err := entry.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// And finally handle the directory itself.
|
||||||
|
return handler(path, stat, follow)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleEvent might need to emit more than one fsnotify event if the events
|
||||||
|
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||||
|
// had the attributes changed between when the association was created and the
|
||||||
|
// when event was returned)
|
||||||
|
func (w *fen) handleEvent(event *unix.PortEvent) error {
|
||||||
|
var (
|
||||||
|
events = event.Events
|
||||||
|
path = event.Path
|
||||||
|
fmode = event.Cookie.(os.FileMode)
|
||||||
|
reRegister = true
|
||||||
|
)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
_, watchedDir := w.dirs[path]
|
||||||
|
_, watchedPath := w.watches[path]
|
||||||
|
w.mu.Unlock()
|
||||||
|
isWatched := watchedDir || watchedPath
|
||||||
|
|
||||||
|
if events&unix.FILE_DELETE != 0 {
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
reRegister = false
|
||||||
|
}
|
||||||
|
if events&unix.FILE_RENAME_FROM != 0 {
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Rename}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Don't keep watching the new file name
|
||||||
|
reRegister = false
|
||||||
|
}
|
||||||
|
if events&unix.FILE_RENAME_TO != 0 {
|
||||||
|
// We don't report a Rename event for this case, because Rename events
|
||||||
|
// are interpreted as referring to the _old_ name of the file, and in
|
||||||
|
// this case the event would refer to the new name of the file. This
|
||||||
|
// type of rename event is not supported by fsnotify.
|
||||||
|
|
||||||
|
// inotify reports a Remove event in this case, so we simulate this
|
||||||
|
// here.
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Don't keep watching the file that was removed
|
||||||
|
reRegister = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The file is gone, nothing left to do.
|
||||||
|
if !reRegister {
|
||||||
|
if watchedDir {
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.dirs, path)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
if watchedPath {
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches, path)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we didn't get a deletion the file still exists and we're going to have
|
||||||
|
// to watch it again. Let's Stat it now so that we can compare permissions
|
||||||
|
// and have what we need to continue watching the file
|
||||||
|
|
||||||
|
stat, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
// This is unexpected, but we should still emit an event. This happens
|
||||||
|
// most often on "rm -r" of a subdirectory inside a watched directory We
|
||||||
|
// get a modify event of something happening inside, but by the time we
|
||||||
|
// get here, the sudirectory is already gone. Clearly we were watching
|
||||||
|
// this path but now it is gone. Let's tell the user that it was
|
||||||
|
// removed.
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Suppress extra write events on removed directories; they are not
|
||||||
|
// informative and can be confusing.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolve symlinks that were explicitly watched as we would have at Add()
|
||||||
|
// time. this helps suppress spurious Chmod events on watched symlinks
|
||||||
|
if isWatched {
|
||||||
|
stat, err = os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
// The symlink still exists, but the target is gone. Report the
|
||||||
|
// Remove similar to above.
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Don't return the error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if events&unix.FILE_MODIFIED != 0 {
|
||||||
|
if fmode.IsDir() && watchedDir {
|
||||||
|
if err := w.updateDirectory(path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Write}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||||
|
// Only send Chmod if perms changed
|
||||||
|
if stat.Mode().Perm() != fmode.Perm() {
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat != nil {
|
||||||
|
// If we get here, it means we've hit an event above that requires us to
|
||||||
|
// continue watching the file or directory
|
||||||
|
err := w.associateFile(path, stat, isWatched)
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
// Path may have been removed since the stat.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The directory was modified, so we must find unwatched entities and watch
|
||||||
|
// them. If something was removed from the directory, nothing will happen, as
|
||||||
|
// everything else should still be watched.
|
||||||
|
func (w *fen) updateDirectory(path string) error {
|
||||||
|
files, err := os.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
// Directory no longer exists: probably just deleted since we got the
|
||||||
|
// event.
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range files {
|
||||||
|
path := filepath.Join(path, entry.Name())
|
||||||
|
if w.port.PathIsWatched(path) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
finfo, err := entry.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = w.associateFile(path, finfo, false)
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
// File may have disappeared between getting the dir listing and
|
||||||
|
// adding the port: that's okay to ignore.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
|
// This is primarily protecting the call to AssociatePath but it is
|
||||||
|
// important and intentional that the call to PathIsWatched is also
|
||||||
|
// protected by this mutex. Without this mutex, AssociatePath has been seen
|
||||||
|
// to error out that the path is already associated.
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
if w.port.PathIsWatched(path) {
|
||||||
|
// Remove the old association in favor of this one If we get ENOENT,
|
||||||
|
// then while the x/sys/unix wrapper still thought that this path was
|
||||||
|
// associated, the underlying event port did not. This call will have
|
||||||
|
// cleared up that discrepancy. The most likely cause is that the event
|
||||||
|
// has fired but we haven't processed it yet.
|
||||||
|
err := w.port.DissociatePath(path)
|
||||||
|
if err != nil && !errors.Is(err, unix.ENOENT) {
|
||||||
|
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var events int
|
||||||
|
if !follow {
|
||||||
|
// Watch symlinks themselves rather than their targets unless this entry
|
||||||
|
// is explicitly watched.
|
||||||
|
events |= unix.FILE_NOFOLLOW
|
||||||
|
}
|
||||||
|
if true { // TODO: implement withOps()
|
||||||
|
events |= unix.FILE_MODIFIED
|
||||||
|
}
|
||||||
|
if true {
|
||||||
|
events |= unix.FILE_ATTRIB
|
||||||
|
}
|
||||||
|
err := w.port.AssociatePath(path, stat, events, stat.Mode())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||||
|
if !w.port.PathIsWatched(path) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := w.port.DissociatePath(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) WatchList() []string {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
entries := make([]string, 0, len(w.watches)+len(w.dirs))
|
||||||
|
for pathname := range w.dirs {
|
||||||
|
entries = append(entries, pathname)
|
||||||
|
}
|
||||||
|
for pathname := range w.watches {
|
||||||
|
entries = append(entries, pathname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fen) xSupports(op Op) bool {
|
||||||
|
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||||
|
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
583
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
Normal file
583
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
Normal file
@@ -0,0 +1,583 @@
|
|||||||
|
//go:build linux && !appengine
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/fsnotify/fsnotify/internal"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type inotify struct {
|
||||||
|
*shared
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
|
||||||
|
// Store fd here as os.File.Read() will no longer return on close after
|
||||||
|
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||||
|
fd int
|
||||||
|
inotifyFile *os.File
|
||||||
|
watches *watches
|
||||||
|
doneResp chan struct{} // Channel to respond to Close
|
||||||
|
|
||||||
|
// Store rename cookies in an array, with the index wrapping to 0. Almost
|
||||||
|
// all of the time what we get is a MOVED_FROM to set the cookie and the
|
||||||
|
// next event inotify sends will be MOVED_TO to read it. However, this is
|
||||||
|
// not guaranteed – as described in inotify(7) – and we may get other events
|
||||||
|
// between the two MOVED_* events (including other MOVED_* ones).
|
||||||
|
//
|
||||||
|
// A second issue is that moving a file outside the watched directory will
|
||||||
|
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
|
||||||
|
// read and delete it. So just storing it in a map would slowly leak memory.
|
||||||
|
//
|
||||||
|
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
|
||||||
|
// Ten items should be more than enough for our purpose, and a loop over
|
||||||
|
// such a short array is faster than a map access anyway (not that it hugely
|
||||||
|
// matters since we're talking about hundreds of ns at the most, but still).
|
||||||
|
cookies [10]koekje
|
||||||
|
cookieIndex uint8
|
||||||
|
cookiesMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
watches struct {
|
||||||
|
wd map[uint32]*watch // wd → watch
|
||||||
|
path map[string]uint32 // pathname → wd
|
||||||
|
}
|
||||||
|
watch struct {
|
||||||
|
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||||
|
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||||
|
path string // Watch path.
|
||||||
|
recurse bool // Recursion with ./...?
|
||||||
|
}
|
||||||
|
koekje struct {
|
||||||
|
cookie uint32
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func newWatches() *watches {
|
||||||
|
return &watches{
|
||||||
|
wd: make(map[uint32]*watch),
|
||||||
|
path: make(map[string]uint32),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
|
||||||
|
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
|
||||||
|
func (w *watches) len() int { return len(w.wd) }
|
||||||
|
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
|
||||||
|
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
|
||||||
|
|
||||||
|
func (w *watches) removePath(path string) ([]uint32, error) {
|
||||||
|
path, recurse := recursivePath(path)
|
||||||
|
wd, ok := w.path[path]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
watch := w.wd[wd]
|
||||||
|
if recurse && !watch.recurse {
|
||||||
|
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(w.path, path)
|
||||||
|
delete(w.wd, wd)
|
||||||
|
if !watch.recurse {
|
||||||
|
return []uint32{wd}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
wds := make([]uint32, 0, 8)
|
||||||
|
wds = append(wds, wd)
|
||||||
|
for p, rwd := range w.path {
|
||||||
|
if strings.HasPrefix(p, path) {
|
||||||
|
delete(w.path, p)
|
||||||
|
delete(w.wd, rwd)
|
||||||
|
wds = append(wds, rwd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return wds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||||
|
var existing *watch
|
||||||
|
wd, ok := w.path[path]
|
||||||
|
if ok {
|
||||||
|
existing = w.wd[wd]
|
||||||
|
}
|
||||||
|
|
||||||
|
upd, err := f(existing)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if upd != nil {
|
||||||
|
w.wd[upd.wd] = upd
|
||||||
|
w.path[upd.path] = upd.wd
|
||||||
|
|
||||||
|
if upd.wd != wd {
|
||||||
|
delete(w.wd, wd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultBufferSize = 0
|
||||||
|
|
||||||
|
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||||
|
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||||
|
// I/O operations won't terminate on close.
|
||||||
|
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||||
|
if fd == -1 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &inotify{
|
||||||
|
shared: newShared(ev, errs),
|
||||||
|
Events: ev,
|
||||||
|
Errors: errs,
|
||||||
|
fd: fd,
|
||||||
|
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||||
|
watches: newWatches(),
|
||||||
|
doneResp: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) Close() error {
|
||||||
|
if w.shared.close() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Causes any blocking reads to return with an error, provided the file
|
||||||
|
// still supports deadline operations.
|
||||||
|
err := w.inotifyFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
<-w.doneResp // Wait for readEvents() to finish.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) Add(name string) error { return w.AddWith(name) }
|
||||||
|
|
||||||
|
func (w *inotify) AddWith(path string, opts ...addOpt) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), path)
|
||||||
|
}
|
||||||
|
|
||||||
|
with := getOptions(opts...)
|
||||||
|
if !w.xSupports(with.op) {
|
||||||
|
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||||
|
}
|
||||||
|
|
||||||
|
add := func(path string, with withOpts, recurse bool) error {
|
||||||
|
var flags uint32
|
||||||
|
if with.noFollow {
|
||||||
|
flags |= unix.IN_DONT_FOLLOW
|
||||||
|
}
|
||||||
|
if with.op.Has(Create) {
|
||||||
|
flags |= unix.IN_CREATE
|
||||||
|
}
|
||||||
|
if with.op.Has(Write) {
|
||||||
|
flags |= unix.IN_MODIFY
|
||||||
|
}
|
||||||
|
if with.op.Has(Remove) {
|
||||||
|
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||||
|
}
|
||||||
|
if with.op.Has(Rename) {
|
||||||
|
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
|
||||||
|
}
|
||||||
|
if with.op.Has(Chmod) {
|
||||||
|
flags |= unix.IN_ATTRIB
|
||||||
|
}
|
||||||
|
if with.op.Has(xUnportableOpen) {
|
||||||
|
flags |= unix.IN_OPEN
|
||||||
|
}
|
||||||
|
if with.op.Has(xUnportableRead) {
|
||||||
|
flags |= unix.IN_ACCESS
|
||||||
|
}
|
||||||
|
if with.op.Has(xUnportableCloseWrite) {
|
||||||
|
flags |= unix.IN_CLOSE_WRITE
|
||||||
|
}
|
||||||
|
if with.op.Has(xUnportableCloseRead) {
|
||||||
|
flags |= unix.IN_CLOSE_NOWRITE
|
||||||
|
}
|
||||||
|
return w.register(path, flags, recurse)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
path, recurse := recursivePath(path)
|
||||||
|
if recurse {
|
||||||
|
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !d.IsDir() {
|
||||||
|
if root == path {
|
||||||
|
return fmt.Errorf("fsnotify: not a directory: %q", path)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a Create event when adding new directory from a recursive
|
||||||
|
// watch; this is for "mkdir -p one/two/three". Usually all those
|
||||||
|
// directories will be created before we can set up watchers on the
|
||||||
|
// subdirectories, so only "one" would be sent as a Create event and
|
||||||
|
// not "one/two" and "one/two/three" (inotifywait -r has the same
|
||||||
|
// problem).
|
||||||
|
if with.sendCreate && root != path {
|
||||||
|
w.sendEvent(Event{Name: root, Op: Create})
|
||||||
|
}
|
||||||
|
|
||||||
|
return add(root, with, true)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return add(path, with, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) register(path string, flags uint32, recurse bool) error {
|
||||||
|
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
|
||||||
|
if existing != nil {
|
||||||
|
flags |= existing.flags | unix.IN_MASK_ADD
|
||||||
|
}
|
||||||
|
|
||||||
|
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
|
||||||
|
if wd == -1 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if e, ok := w.watches.wd[uint32(wd)]; ok {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if existing == nil {
|
||||||
|
return &watch{
|
||||||
|
wd: uint32(wd),
|
||||||
|
path: path,
|
||||||
|
flags: flags,
|
||||||
|
recurse: recurse,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
existing.wd = uint32(wd)
|
||||||
|
existing.flags = flags
|
||||||
|
return existing, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) Remove(name string) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), name)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
return w.remove(filepath.Clean(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) remove(name string) error {
|
||||||
|
wds, err := w.watches.removePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, wd := range wds {
|
||||||
|
_, err := unix.InotifyRmWatch(w.fd, wd)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Perhaps it's not helpful to return an error here in every
|
||||||
|
// case; the only two possible errors are:
|
||||||
|
//
|
||||||
|
// EBADF, which happens when w.fd is not a valid file descriptor of
|
||||||
|
// any kind.
|
||||||
|
//
|
||||||
|
// EINVAL, which is when fd is not an inotify descriptor or wd is
|
||||||
|
// not a valid watch descriptor. Watch descriptors are invalidated
|
||||||
|
// when they are removed explicitly or implicitly; explicitly by
|
||||||
|
// inotify_rm_watch, implicitly when the file they are watching is
|
||||||
|
// deleted.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) WatchList() []string {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
entries := make([]string, 0, w.watches.len())
|
||||||
|
for pathname := range w.watches.path {
|
||||||
|
entries = append(entries, pathname)
|
||||||
|
}
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the inotify file descriptor, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel
|
||||||
|
func (w *inotify) readEvents() {
|
||||||
|
defer func() {
|
||||||
|
close(w.doneResp)
|
||||||
|
close(w.Errors)
|
||||||
|
close(w.Events)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||||
|
for {
|
||||||
|
if w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := w.inotifyFile.Read(buf[:])
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrClosed) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if n < unix.SizeofInotifyEvent {
|
||||||
|
err := errors.New("notify: short read in readEvents()") // Read was too short.
|
||||||
|
if n == 0 {
|
||||||
|
err = io.EOF // If EOF is received. This should really never happen.
|
||||||
|
}
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't know how many events we just read into the buffer While the
|
||||||
|
// offset points to at least one whole event.
|
||||||
|
var offset uint32
|
||||||
|
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||||
|
// Point to the event in the buffer.
|
||||||
|
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||||
|
|
||||||
|
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
|
||||||
|
if !w.sendError(ErrEventOverflow) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ev, ok := w.handleEvent(inEvent, &buf, offset)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !w.sendEvent(ev) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
offset += unix.SizeofInotifyEvent + inEvent.Len
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
/// If the event happened to the watched directory or the watched file, the
|
||||||
|
/// kernel doesn't append the filename to the event, but we would like to
|
||||||
|
/// always fill the the "Name" field with a valid filename. We retrieve the
|
||||||
|
/// path of the watch from the "paths" map.
|
||||||
|
///
|
||||||
|
/// Can be nil if Remove() was called in another goroutine for this path
|
||||||
|
/// inbetween reading the events from the kernel and reading the internal
|
||||||
|
/// state. Not much we can do about it, so just skip. See #616.
|
||||||
|
watch := w.watches.byWd(uint32(inEvent.Wd))
|
||||||
|
if watch == nil {
|
||||||
|
return Event{}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
name = watch.path
|
||||||
|
nameLen = uint32(inEvent.Len)
|
||||||
|
)
|
||||||
|
if nameLen > 0 {
|
||||||
|
/// Point "bytes" at the first byte of the filename
|
||||||
|
bb := *buf
|
||||||
|
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||||
|
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||||
|
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
|
||||||
|
}
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
|
||||||
|
w.watches.remove(watch)
|
||||||
|
return Event{}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// inotify will automatically remove the watch on deletes; just need
|
||||||
|
// to clean our state here.
|
||||||
|
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||||
|
w.watches.remove(watch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can't really update the state when a watched path is moved; only
|
||||||
|
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
|
||||||
|
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||||
|
if watch.recurse { // Do nothing
|
||||||
|
return Event{}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.remove(watch.path)
|
||||||
|
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return Event{}, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Skip if we're watching both this path and the parent; the parent will
|
||||||
|
/// already send a delete so no need to do it twice.
|
||||||
|
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
|
||||||
|
_, ok := w.watches.path[filepath.Dir(watch.path)]
|
||||||
|
if ok {
|
||||||
|
return Event{}, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
|
||||||
|
// Need to update watch path for recurse.
|
||||||
|
if watch.recurse {
|
||||||
|
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
|
||||||
|
/// New directory created: set up watch on it.
|
||||||
|
if isDir && ev.Has(Create) {
|
||||||
|
err := w.register(ev.Name, watch.flags, true)
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return Event{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// This was a directory rename, so we need to update all the
|
||||||
|
// children.
|
||||||
|
//
|
||||||
|
// TODO: this is of course pretty slow; we should use a better data
|
||||||
|
// structure for storing all of this, e.g. store children in the
|
||||||
|
// watch. I have some code for this in my kqueue refactor we can use
|
||||||
|
// in the future. For now I'm okay with this as it's not publicly
|
||||||
|
// available. Correctness first, performance second.
|
||||||
|
if ev.renamedFrom != "" {
|
||||||
|
for k, ww := range w.watches.wd {
|
||||||
|
if k == watch.wd || ww.path == ev.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(ww.path, ev.renamedFrom) {
|
||||||
|
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
|
||||||
|
w.watches.wd[k] = ww
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ev, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) isRecursive(path string) bool {
|
||||||
|
ww := w.watches.byPath(path)
|
||||||
|
if ww == nil { // path could be a file, so also check the Dir.
|
||||||
|
ww = w.watches.byPath(filepath.Dir(path))
|
||||||
|
}
|
||||||
|
return ww != nil && ww.recurse
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&unix.IN_OPEN == unix.IN_OPEN {
|
||||||
|
e.Op |= xUnportableOpen
|
||||||
|
}
|
||||||
|
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
|
||||||
|
e.Op |= xUnportableRead
|
||||||
|
}
|
||||||
|
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
|
||||||
|
e.Op |= xUnportableCloseWrite
|
||||||
|
}
|
||||||
|
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
|
||||||
|
e.Op |= xUnportableCloseRead
|
||||||
|
}
|
||||||
|
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
|
||||||
|
if cookie != 0 {
|
||||||
|
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||||
|
w.cookiesMu.Lock()
|
||||||
|
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
|
||||||
|
w.cookieIndex++
|
||||||
|
if w.cookieIndex > 9 {
|
||||||
|
w.cookieIndex = 0
|
||||||
|
}
|
||||||
|
w.cookiesMu.Unlock()
|
||||||
|
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||||
|
w.cookiesMu.Lock()
|
||||||
|
var prev string
|
||||||
|
for _, c := range w.cookies {
|
||||||
|
if c.cookie == cookie {
|
||||||
|
prev = c.path
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.cookiesMu.Unlock()
|
||||||
|
e.renamedFrom = prev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) xSupports(op Op) bool {
|
||||||
|
return true // Supports everything.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *inotify) state() {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
for wd, ww := range w.watches.wd {
|
||||||
|
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
705
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
Normal file
705
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,705 @@
|
|||||||
|
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fsnotify/fsnotify/internal"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type kqueue struct {
|
||||||
|
*shared
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
|
||||||
|
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||||
|
closepipe [2]int // Pipe used for closing kq.
|
||||||
|
watches *watches
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
watches struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
wd map[int]watch // wd → watch
|
||||||
|
path map[string]int // pathname → wd
|
||||||
|
byDir map[string]map[int]struct{} // dirname(path) → wd
|
||||||
|
seen map[string]struct{} // Keep track of if we know this file exists.
|
||||||
|
byUser map[string]struct{} // Watches added with Watcher.Add()
|
||||||
|
}
|
||||||
|
watch struct {
|
||||||
|
wd int
|
||||||
|
name string
|
||||||
|
linkName string // In case of links; name is the target, and this is the link.
|
||||||
|
isDir bool
|
||||||
|
dirFlags uint32
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func newWatches() *watches {
|
||||||
|
return &watches{
|
||||||
|
wd: make(map[int]watch),
|
||||||
|
path: make(map[string]int),
|
||||||
|
byDir: make(map[string]map[int]struct{}),
|
||||||
|
seen: make(map[string]struct{}),
|
||||||
|
byUser: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) listPaths(userOnly bool) []string {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
|
||||||
|
if userOnly {
|
||||||
|
l := make([]string, 0, len(w.byUser))
|
||||||
|
for p := range w.byUser {
|
||||||
|
l = append(l, p)
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
l := make([]string, 0, len(w.path))
|
||||||
|
for p := range w.path {
|
||||||
|
l = append(l, p)
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) watchesInDir(path string) []string {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
|
||||||
|
l := make([]string, 0, 4)
|
||||||
|
for fd := range w.byDir[path] {
|
||||||
|
info := w.wd[fd]
|
||||||
|
if _, ok := w.byUser[info.name]; !ok {
|
||||||
|
l = append(l, info.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark path as added by the user.
|
||||||
|
func (w *watches) addUserWatch(path string) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
w.byUser[path] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) addLink(path string, fd int) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
w.path[path] = fd
|
||||||
|
w.seen[path] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) add(path, linkPath string, fd int, isDir bool) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
w.path[path] = fd
|
||||||
|
w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
|
||||||
|
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
byDir, ok := w.byDir[parent]
|
||||||
|
if !ok {
|
||||||
|
byDir = make(map[int]struct{}, 1)
|
||||||
|
w.byDir[parent] = byDir
|
||||||
|
}
|
||||||
|
byDir[fd] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) byWd(fd int) (watch, bool) {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
info, ok := w.wd[fd]
|
||||||
|
return info, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) byPath(path string) (watch, bool) {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
info, ok := w.wd[w.path[path]]
|
||||||
|
return info, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) updateDirFlags(path string, flags uint32) bool {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
fd, ok := w.path[path]
|
||||||
|
if !ok { // Already deleted: don't re-set it here.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
info := w.wd[fd]
|
||||||
|
info.dirFlags = flags
|
||||||
|
w.wd[fd] = info
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) remove(fd int, path string) bool {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
isDir := w.wd[fd].isDir
|
||||||
|
delete(w.path, path)
|
||||||
|
delete(w.byUser, path)
|
||||||
|
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
delete(w.byDir[parent], fd)
|
||||||
|
|
||||||
|
if len(w.byDir[parent]) == 0 {
|
||||||
|
delete(w.byDir, parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(w.wd, fd)
|
||||||
|
delete(w.seen, path)
|
||||||
|
return isDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) markSeen(path string, exists bool) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
if exists {
|
||||||
|
w.seen[path] = struct{}{}
|
||||||
|
} else {
|
||||||
|
delete(w.seen, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) seenBefore(path string) bool {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
_, ok := w.seen[path]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultBufferSize = 0
|
||||||
|
|
||||||
|
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||||
|
kq, closepipe, err := newKqueue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &kqueue{
|
||||||
|
shared: newShared(ev, errs),
|
||||||
|
Events: ev,
|
||||||
|
Errors: errs,
|
||||||
|
kq: kq,
|
||||||
|
closepipe: closepipe,
|
||||||
|
watches: newWatches(),
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newKqueue creates a new kernel event queue and returns a descriptor.
|
||||||
|
//
|
||||||
|
// This registers a new event on closepipe, which will trigger an event when
|
||||||
|
// it's closed. This way we can use kevent() without timeout/polling; without
|
||||||
|
// the closepipe, it would block forever and we wouldn't be able to stop it at
|
||||||
|
// all.
|
||||||
|
func newKqueue() (kq int, closepipe [2]int, err error) {
|
||||||
|
kq, err = unix.Kqueue()
|
||||||
|
if err != nil {
|
||||||
|
return kq, closepipe, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register the close pipe.
|
||||||
|
err = unix.Pipe(closepipe[:])
|
||||||
|
if err != nil {
|
||||||
|
unix.Close(kq)
|
||||||
|
return kq, closepipe, err
|
||||||
|
}
|
||||||
|
unix.CloseOnExec(closepipe[0])
|
||||||
|
unix.CloseOnExec(closepipe[1])
|
||||||
|
|
||||||
|
// Register changes to listen on the closepipe.
|
||||||
|
changes := make([]unix.Kevent_t, 1)
|
||||||
|
// SetKevent converts int to the platform-specific types.
|
||||||
|
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
|
||||||
|
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
|
||||||
|
|
||||||
|
ok, err := unix.Kevent(kq, changes, nil, nil)
|
||||||
|
if ok == -1 {
|
||||||
|
unix.Close(kq)
|
||||||
|
unix.Close(closepipe[0])
|
||||||
|
unix.Close(closepipe[1])
|
||||||
|
return kq, closepipe, err
|
||||||
|
}
|
||||||
|
return kq, closepipe, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *kqueue) Close() error {
|
||||||
|
if w.shared.close() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pathsToRemove := w.watches.listPaths(false)
|
||||||
|
for _, name := range pathsToRemove {
|
||||||
|
w.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *kqueue) Add(name string) error { return w.AddWith(name) }
|
||||||
|
|
||||||
|
func (w *kqueue) AddWith(name string, opts ...addOpt) error {
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), name)
|
||||||
|
}
|
||||||
|
|
||||||
|
with := getOptions(opts...)
|
||||||
|
if !w.xSupports(with.op) {
|
||||||
|
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := w.addWatch(name, noteAllEvents, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.watches.addUserWatch(name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *kqueue) Remove(name string) error {
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), name)
|
||||||
|
}
|
||||||
|
return w.remove(name, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *kqueue) remove(name string, unwatchFiles bool) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
info, ok := w.watches.byPath(name)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
unix.Close(info.wd)
|
||||||
|
|
||||||
|
isDir := w.watches.remove(info.wd, name)
|
||||||
|
|
||||||
|
// Find all watched paths that are in this directory that are not external.
|
||||||
|
if unwatchFiles && isDir {
|
||||||
|
pathsToRemove := w.watches.watchesInDir(name)
|
||||||
|
for _, name := range pathsToRemove {
|
||||||
|
// Since these are internal, not much sense in propagating error to
|
||||||
|
// the user, as that will just confuse them with an error about a
|
||||||
|
// path they did not explicitly watch themselves.
|
||||||
|
w.Remove(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *kqueue) WatchList() []string {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.watches.listPaths(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||||
|
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||||
|
|
||||||
|
// addWatch adds name to the watched file set; the flags are interpreted as
|
||||||
|
// described in kevent(2).
|
||||||
|
//
|
||||||
|
// Returns the real path to the file which was added, with symlinks resolved.
|
||||||
|
func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
|
||||||
|
if w.isClosed() {
|
||||||
|
return "", ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
|
info, alreadyWatching := w.watches.byPath(name)
|
||||||
|
if !alreadyWatching {
|
||||||
|
fi, err := os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't watch sockets or named pipes.
|
||||||
|
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Follow symlinks, but only for paths added with Add(), and not paths
|
||||||
|
// we're adding from internalWatch from a listdir.
|
||||||
|
if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
link, err := os.Readlink(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(link) {
|
||||||
|
link = filepath.Join(filepath.Dir(name), link)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, alreadyWatching = w.watches.byPath(link)
|
||||||
|
if alreadyWatching {
|
||||||
|
// Add to watches so we don't get spurious Create events later
|
||||||
|
// on when we diff the directories.
|
||||||
|
w.watches.addLink(name, 0)
|
||||||
|
return link, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
info.linkName = name
|
||||||
|
name = link
|
||||||
|
fi, err = os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
||||||
|
// See #354, and Go issues 11180 and 39237.
|
||||||
|
for {
|
||||||
|
info.wd, err = unix.Open(name, openMode, 0)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if errors.Is(err, unix.EINTR) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
info.isDir = fi.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
|
||||||
|
if err != nil {
|
||||||
|
unix.Close(info.wd)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !alreadyWatching {
|
||||||
|
w.watches.add(name, info.linkName, info.wd, info.isDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch the directory if it has not been watched before, or if it was
|
||||||
|
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||||
|
if info.isDir {
|
||||||
|
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||||
|
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||||
|
if !w.watches.updateDirFlags(name, flags) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if watchDir {
|
||||||
|
d := name
|
||||||
|
if info.linkName != "" {
|
||||||
|
d = info.linkName
|
||||||
|
}
|
||||||
|
if err := w.watchDirectoryFiles(d); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from kqueue and converts the received kevents into
|
||||||
|
// Event values that it sends down the Events channel.
|
||||||
|
func (w *kqueue) readEvents() {
|
||||||
|
defer func() {
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
_ = unix.Close(w.kq)
|
||||||
|
unix.Close(w.closepipe[0])
|
||||||
|
}()
|
||||||
|
|
||||||
|
eventBuffer := make([]unix.Kevent_t, 10)
|
||||||
|
for {
|
||||||
|
kevents, err := w.read(eventBuffer)
|
||||||
|
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||||
|
if err != nil && err != unix.EINTR {
|
||||||
|
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kevent := range kevents {
|
||||||
|
var (
|
||||||
|
wd = int(kevent.Ident)
|
||||||
|
mask = uint32(kevent.Fflags)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Shut down the loop when the pipe is closed, but only after all
|
||||||
|
// other events have been processed.
|
||||||
|
if wd == w.closepipe[0] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
path, ok := w.watches.byWd(wd)
|
||||||
|
if debug {
|
||||||
|
internal.Debug(path.name, &kevent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// On macOS it seems that sometimes an event with Ident=0 is
|
||||||
|
// delivered, and no other flags/information beyond that, even
|
||||||
|
// though we never saw such a file descriptor. For example in
|
||||||
|
// TestWatchSymlink/277 (usually at the end, but sometimes sooner):
|
||||||
|
//
|
||||||
|
// fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
|
||||||
|
// unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||||
|
// unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||||
|
//
|
||||||
|
// The first is a normal event, the second with Ident 0. No error
|
||||||
|
// flag, no data, no ... nothing.
|
||||||
|
//
|
||||||
|
// I read a bit through bsd/kern_event.c from the xnu source, but I
|
||||||
|
// don't really see an obvious location where this is triggered –
|
||||||
|
// this doesn't seem intentional, but idk...
|
||||||
|
//
|
||||||
|
// Technically fd 0 is a valid descriptor, so only skip it if
|
||||||
|
// there's no path, and if we're on macOS.
|
||||||
|
if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
event := w.newEvent(path.name, path.linkName, mask)
|
||||||
|
|
||||||
|
if event.Has(Rename) || event.Has(Remove) {
|
||||||
|
w.remove(event.Name, false)
|
||||||
|
w.watches.markSeen(event.Name, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.isDir && event.Has(Write) && !event.Has(Remove) {
|
||||||
|
w.dirChange(event.Name)
|
||||||
|
} else if !w.sendEvent(event) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if event.Has(Remove) {
|
||||||
|
// Look for a file that may have overwritten this; for example,
|
||||||
|
// mv f1 f2 will delete f2, then create f2.
|
||||||
|
if path.isDir {
|
||||||
|
fileDir := filepath.Clean(event.Name)
|
||||||
|
_, found := w.watches.byPath(fileDir)
|
||||||
|
if found {
|
||||||
|
// TODO: this branch is never triggered in any test.
|
||||||
|
// Added in d6220df (2012).
|
||||||
|
// isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
|
||||||
|
//
|
||||||
|
// I don't really get how this can be triggered either.
|
||||||
|
// And it wasn't triggered in the patch that added it,
|
||||||
|
// either.
|
||||||
|
//
|
||||||
|
// Original also had a comment:
|
||||||
|
// make sure the directory exists before we watch for
|
||||||
|
// changes. When we do a recursive watch and perform
|
||||||
|
// rm -rf, the parent directory might have gone
|
||||||
|
// missing, ignore the missing directory and let the
|
||||||
|
// upcoming delete event remove the watch from the
|
||||||
|
// parent directory.
|
||||||
|
err := w.dirChange(fileDir)
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
path := filepath.Clean(event.Name)
|
||||||
|
if fi, err := os.Lstat(path); err == nil {
|
||||||
|
err := w.sendCreateIfNew(path, fi)
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||||
|
func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if linkName != "" {
|
||||||
|
// If the user watched "/path/link" then emit events as "/path/link"
|
||||||
|
// rather than "/path/target".
|
||||||
|
e.Name = linkName
|
||||||
|
}
|
||||||
|
|
||||||
|
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
// No point sending a write and delete event at the same time: if it's gone,
|
||||||
|
// then it's gone.
|
||||||
|
if e.Op.Has(Write) && e.Op.Has(Remove) {
|
||||||
|
e.Op &^= Write
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||||
|
func (w *kqueue) watchDirectoryFiles(dirPath string) error {
|
||||||
|
files, err := os.ReadDir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
path := filepath.Join(dirPath, f.Name())
|
||||||
|
|
||||||
|
fi, err := f.Info()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%q: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanPath, err := w.internalWatch(path, fi)
|
||||||
|
if err != nil {
|
||||||
|
// No permission to read the file; that's not a problem: just skip.
|
||||||
|
// But do add it to w.fileExists to prevent it from being picked up
|
||||||
|
// as a "new" file later (it still shows up in the directory
|
||||||
|
// listing).
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
||||||
|
cleanPath = filepath.Clean(path)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("%q: %w", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.watches.markSeen(cleanPath, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search the directory for new files and send an event for them.
|
||||||
|
//
|
||||||
|
// This functionality is to have the BSD watcher match the inotify, which sends
|
||||||
|
// a create event for files created in a watched directory.
|
||||||
|
func (w *kqueue) dirChange(dir string) error {
|
||||||
|
files, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
// Directory no longer exists: we can ignore this safely. kqueue will
|
||||||
|
// still give us the correct events.
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
fi, err := f.Info()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||||
|
if err != nil {
|
||||||
|
// Don't need to send an error if this file isn't readable.
|
||||||
|
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a create event if the file isn't already being tracked, and start
|
||||||
|
// watching this file.
|
||||||
|
func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
|
||||||
|
if !w.watches.seenBefore(path) {
|
||||||
|
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Like watchDirectoryFiles, but without doing another ReadDir.
|
||||||
|
path, err := w.internalWatch(path, fi)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.watches.markSeen(path, true)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
|
||||||
|
if fi.IsDir() {
|
||||||
|
// mimic Linux providing delete events for subdirectories, but preserve
|
||||||
|
// the flags used if currently watching subdirectory
|
||||||
|
info, _ := w.watches.byPath(name)
|
||||||
|
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch file to mimic Linux inotify.
|
||||||
|
return w.addWatch(name, noteAllEvents, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register events with the queue.
|
||||||
|
func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
|
||||||
|
changes := make([]unix.Kevent_t, len(fds))
|
||||||
|
for i, fd := range fds {
|
||||||
|
// SetKevent converts int to the platform-specific types.
|
||||||
|
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||||
|
changes[i].Fflags = fflags
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register the events.
|
||||||
|
success, err := unix.Kevent(w.kq, changes, nil, nil)
|
||||||
|
if success == -1 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// read retrieves pending events, or waits until an event occurs.
|
||||||
|
func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
|
||||||
|
n, err := unix.Kevent(w.kq, nil, events, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return events[0:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *kqueue) xSupports(op Op) bool {
|
||||||
|
//if runtime.GOOS == "freebsd" {
|
||||||
|
// return true // Supports everything.
|
||||||
|
//}
|
||||||
|
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||||
|
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
22
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
Normal file
22
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
type other struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultBufferSize = 0
|
||||||
|
|
||||||
|
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||||
|
return nil, errors.New("fsnotify not supported on the current platform")
|
||||||
|
}
|
||||||
|
func (w *other) Close() error { return nil }
|
||||||
|
func (w *other) WatchList() []string { return nil }
|
||||||
|
func (w *other) Add(name string) error { return nil }
|
||||||
|
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
|
||||||
|
func (w *other) Remove(name string) error { return nil }
|
||||||
|
func (w *other) xSupports(op Op) bool { return false }
|
||||||
680
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
Normal file
680
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
Normal file
@@ -0,0 +1,680 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
// Windows backend based on ReadDirectoryChangesW()
|
||||||
|
//
|
||||||
|
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/fsnotify/fsnotify/internal"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
type readDirChangesW struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
|
||||||
|
port windows.Handle // Handle to completion port
|
||||||
|
input chan *input // Inputs to the reader are sent on this channel
|
||||||
|
done chan chan<- error
|
||||||
|
|
||||||
|
mu sync.Mutex // Protects access to watches, closed
|
||||||
|
watches watchMap // Map of watches (key: i-number)
|
||||||
|
closed bool // Set to true when Close() is first called
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultBufferSize = 50
|
||||||
|
|
||||||
|
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||||
|
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||||
|
}
|
||||||
|
w := &readDirChangesW{
|
||||||
|
Events: ev,
|
||||||
|
Errors: errs,
|
||||||
|
port: port,
|
||||||
|
watches: make(watchMap),
|
||||||
|
input: make(chan *input, 1),
|
||||||
|
done: make(chan chan<- error, 1),
|
||||||
|
}
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) isClosed() bool {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
return w.closed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
|
||||||
|
if mask == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
event := w.newEvent(name, uint32(mask))
|
||||||
|
event.renamedFrom = renamedFrom
|
||||||
|
select {
|
||||||
|
case ch := <-w.done:
|
||||||
|
w.done <- ch
|
||||||
|
case w.Events <- event:
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the error was sent, or false if watcher is closed.
|
||||||
|
func (w *readDirChangesW) sendError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return false
|
||||||
|
case w.Errors <- err:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) Close() error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.closed = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
// Send "done" message to the reader goroutine
|
||||||
|
ch := make(chan error)
|
||||||
|
w.done <- ch
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
|
||||||
|
|
||||||
|
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
with := getOptions(opts...)
|
||||||
|
if !w.xSupports(with.op) {
|
||||||
|
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||||
|
}
|
||||||
|
if with.bufsize < 4096 {
|
||||||
|
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||||
|
}
|
||||||
|
|
||||||
|
in := &input{
|
||||||
|
op: opAddWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
flags: sysFSALLEVENTS,
|
||||||
|
reply: make(chan error),
|
||||||
|
bufsize: with.bufsize,
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) Remove(name string) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
in := &input{
|
||||||
|
op: opRemoveWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
reply: make(chan error),
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) WatchList() []string {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
entries := make([]string, 0, len(w.watches))
|
||||||
|
for _, entry := range w.watches {
|
||||||
|
for _, watchEntry := range entry {
|
||||||
|
for name := range watchEntry.names {
|
||||||
|
entries = append(entries, filepath.Join(watchEntry.path, name))
|
||||||
|
}
|
||||||
|
// the directory itself is being watched
|
||||||
|
if watchEntry.mask != 0 {
|
||||||
|
entries = append(entries, watchEntry.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
|
||||||
|
// These options are from the old golang.org/x/exp/winfsnotify, where you could
|
||||||
|
// add various options to the watch. This has long since been removed.
|
||||||
|
//
|
||||||
|
// The "sys" in the name is misleading as they're not part of any "system".
|
||||||
|
//
|
||||||
|
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||||
|
const (
|
||||||
|
sysFSALLEVENTS = 0xfff
|
||||||
|
sysFSCREATE = 0x100
|
||||||
|
sysFSDELETE = 0x200
|
||||||
|
sysFSDELETESELF = 0x400
|
||||||
|
sysFSMODIFY = 0x2
|
||||||
|
sysFSMOVE = 0xc0
|
||||||
|
sysFSMOVEDFROM = 0x40
|
||||||
|
sysFSMOVEDTO = 0x80
|
||||||
|
sysFSMOVESELF = 0x800
|
||||||
|
sysFSIGNORED = 0x8000
|
||||||
|
)
|
||||||
|
|
||||||
|
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
opAddWatch = iota
|
||||||
|
opRemoveWatch
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
provisional uint64 = 1 << (32 + iota)
|
||||||
|
)
|
||||||
|
|
||||||
|
type input struct {
|
||||||
|
op int
|
||||||
|
path string
|
||||||
|
flags uint32
|
||||||
|
bufsize int
|
||||||
|
reply chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type inode struct {
|
||||||
|
handle windows.Handle
|
||||||
|
volume uint32
|
||||||
|
index uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type watch struct {
|
||||||
|
ov windows.Overlapped
|
||||||
|
ino *inode // i-number
|
||||||
|
recurse bool // Recursive watch?
|
||||||
|
path string // Directory path
|
||||||
|
mask uint64 // Directory itself is being watched with these notify flags
|
||||||
|
names map[string]uint64 // Map of names being watched and their notify flags
|
||||||
|
rename string // Remembers the old name while renaming a file
|
||||||
|
buf []byte // buffer, allocated later
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
indexMap map[uint64]*watch
|
||||||
|
watchMap map[uint32]indexMap
|
||||||
|
)
|
||||||
|
|
||||||
|
func (w *readDirChangesW) wakeupReader() error {
|
||||||
|
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||||
|
if err != nil {
|
||||||
|
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
|
||||||
|
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||||
|
if err != nil {
|
||||||
|
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||||
|
}
|
||||||
|
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||||
|
dir = pathname
|
||||||
|
} else {
|
||||||
|
dir, _ = filepath.Split(pathname)
|
||||||
|
dir = filepath.Clean(dir)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
|
||||||
|
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||||
|
windows.FILE_LIST_DIRECTORY,
|
||||||
|
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||||
|
nil, windows.OPEN_EXISTING,
|
||||||
|
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateFile", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fi windows.ByHandleFileInformation
|
||||||
|
err = windows.GetFileInformationByHandle(h, &fi)
|
||||||
|
if err != nil {
|
||||||
|
windows.CloseHandle(h)
|
||||||
|
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
|
||||||
|
}
|
||||||
|
ino = &inode{
|
||||||
|
handle: h,
|
||||||
|
volume: fi.VolumeSerialNumber,
|
||||||
|
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||||
|
}
|
||||||
|
return ino, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) get(ino *inode) *watch {
|
||||||
|
if i := m[ino.volume]; i != nil {
|
||||||
|
return i[ino.index]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) set(ino *inode, watch *watch) {
|
||||||
|
i := m[ino.volume]
|
||||||
|
if i == nil {
|
||||||
|
i = make(indexMap)
|
||||||
|
m[ino.volume] = i
|
||||||
|
}
|
||||||
|
i[ino.index] = watch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||||
|
pathname, recurse := recursivePath(pathname)
|
||||||
|
|
||||||
|
dir, err := w.getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ino, err := w.getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
watchEntry := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
if watchEntry == nil {
|
||||||
|
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
windows.CloseHandle(ino.handle)
|
||||||
|
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||||
|
}
|
||||||
|
watchEntry = &watch{
|
||||||
|
ino: ino,
|
||||||
|
path: dir,
|
||||||
|
names: make(map[string]uint64),
|
||||||
|
recurse: recurse,
|
||||||
|
buf: make([]byte, bufsize),
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches.set(ino, watchEntry)
|
||||||
|
w.mu.Unlock()
|
||||||
|
flags |= provisional
|
||||||
|
} else {
|
||||||
|
windows.CloseHandle(ino.handle)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask |= flags
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.startRead(watchEntry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask &= ^provisional
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *readDirChangesW) remWatch(pathname string) error {
|
||||||
|
pathname, recurse := recursivePath(pathname)
|
||||||
|
|
||||||
|
dir, err := w.getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ino, err := w.getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
watch := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if recurse && !watch.recurse {
|
||||||
|
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = windows.CloseHandle(ino.handle)
|
||||||
|
if err != nil {
|
||||||
|
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||||
|
}
|
||||||
|
if watch == nil {
|
||||||
|
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||||
|
watch.mask = 0
|
||||||
|
} else {
|
||||||
|
name := filepath.Base(pathname)
|
||||||
|
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.startRead(watch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *readDirChangesW) deleteWatch(watch *watch) {
|
||||||
|
for name, mask := range watch.names {
|
||||||
|
if mask&provisional == 0 {
|
||||||
|
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
|
||||||
|
}
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
if watch.mask != 0 {
|
||||||
|
if watch.mask&provisional == 0 {
|
||||||
|
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||||
|
}
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *readDirChangesW) startRead(watch *watch) error {
|
||||||
|
err := windows.CancelIo(watch.ino.handle)
|
||||||
|
if err != nil {
|
||||||
|
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
}
|
||||||
|
mask := w.toWindowsFlags(watch.mask)
|
||||||
|
for _, m := range watch.names {
|
||||||
|
mask |= w.toWindowsFlags(m)
|
||||||
|
}
|
||||||
|
if mask == 0 {
|
||||||
|
err := windows.CloseHandle(watch.ino.handle)
|
||||||
|
if err != nil {
|
||||||
|
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to pass the array, rather than the slice.
|
||||||
|
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
|
||||||
|
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
|
||||||
|
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
|
||||||
|
watch.recurse, mask, nil, &watch.ov, 0)
|
||||||
|
if rdErr != nil {
|
||||||
|
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||||
|
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||||
|
// Watched directory was probably removed
|
||||||
|
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the I/O completion port, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel.
|
||||||
|
// Entry point to the I/O thread.
|
||||||
|
func (w *readDirChangesW) readEvents() {
|
||||||
|
var (
|
||||||
|
n uint32
|
||||||
|
key uintptr
|
||||||
|
ov *windows.Overlapped
|
||||||
|
)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// This error is handled after the watch == nil check below.
|
||||||
|
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||||
|
|
||||||
|
watch := (*watch)(unsafe.Pointer(ov))
|
||||||
|
if watch == nil {
|
||||||
|
select {
|
||||||
|
case ch := <-w.done:
|
||||||
|
w.mu.Lock()
|
||||||
|
var indexes []indexMap
|
||||||
|
for _, index := range w.watches {
|
||||||
|
indexes = append(indexes, index)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, index := range indexes {
|
||||||
|
for _, watch := range index {
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := windows.CloseHandle(w.port)
|
||||||
|
if err != nil {
|
||||||
|
err = os.NewSyscallError("CloseHandle", err)
|
||||||
|
}
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
ch <- err
|
||||||
|
return
|
||||||
|
case in := <-w.input:
|
||||||
|
switch in.op {
|
||||||
|
case opAddWatch:
|
||||||
|
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
|
||||||
|
case opRemoveWatch:
|
||||||
|
in.reply <- w.remWatch(in.path)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch qErr {
|
||||||
|
case nil:
|
||||||
|
// No error
|
||||||
|
case windows.ERROR_MORE_DATA:
|
||||||
|
if watch == nil {
|
||||||
|
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||||
|
} else {
|
||||||
|
// The i/o succeeded but the buffer is full.
|
||||||
|
// In theory we should be building up a full packet.
|
||||||
|
// In practice we can get away with just carrying on.
|
||||||
|
n = uint32(unsafe.Sizeof(watch.buf))
|
||||||
|
}
|
||||||
|
case windows.ERROR_ACCESS_DENIED:
|
||||||
|
// Watched directory was probably removed
|
||||||
|
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
continue
|
||||||
|
case windows.ERROR_OPERATION_ABORTED:
|
||||||
|
// CancelIo was called on this handle
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint32
|
||||||
|
for {
|
||||||
|
if n == 0 {
|
||||||
|
w.sendError(ErrEventOverflow)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point "raw" to the event in the buffer
|
||||||
|
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||||
|
|
||||||
|
// Create a buf that is the size of the path name
|
||||||
|
size := int(raw.FileNameLength / 2)
|
||||||
|
var buf []uint16
|
||||||
|
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
|
||||||
|
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||||
|
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
||||||
|
sh.Len = size
|
||||||
|
sh.Cap = size
|
||||||
|
name := windows.UTF16ToString(buf)
|
||||||
|
fullname := filepath.Join(watch.path, name)
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
internal.Debug(fullname, raw.Action)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mask uint64
|
||||||
|
switch raw.Action {
|
||||||
|
case windows.FILE_ACTION_REMOVED:
|
||||||
|
mask = sysFSDELETESELF
|
||||||
|
case windows.FILE_ACTION_MODIFIED:
|
||||||
|
mask = sysFSMODIFY
|
||||||
|
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
watch.rename = name
|
||||||
|
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
// Update saved path of all sub-watches.
|
||||||
|
old := filepath.Join(watch.path, watch.rename)
|
||||||
|
w.mu.Lock()
|
||||||
|
for _, watchMap := range w.watches {
|
||||||
|
for _, ww := range watchMap {
|
||||||
|
if strings.HasPrefix(ww.path, old) {
|
||||||
|
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if watch.names[watch.rename] != 0 {
|
||||||
|
watch.names[name] |= watch.names[watch.rename]
|
||||||
|
delete(watch.names, watch.rename)
|
||||||
|
mask = sysFSMOVESELF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
w.sendEvent(fullname, "", watch.names[name]&mask)
|
||||||
|
}
|
||||||
|
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||||
|
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||||
|
} else {
|
||||||
|
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||||
|
}
|
||||||
|
|
||||||
|
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
if raw.NextEntryOffset == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset += raw.NextEntryOffset
|
||||||
|
|
||||||
|
// Error!
|
||||||
|
if offset >= n {
|
||||||
|
//lint:ignore ST1005 Windows should be capitalized
|
||||||
|
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.startRead(watch); err != nil {
|
||||||
|
w.sendError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
|
||||||
|
var m uint32
|
||||||
|
if mask&sysFSMODIFY != 0 {
|
||||||
|
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||||
|
}
|
||||||
|
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||||
|
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
|
||||||
|
switch action {
|
||||||
|
case windows.FILE_ACTION_ADDED:
|
||||||
|
return sysFSCREATE
|
||||||
|
case windows.FILE_ACTION_REMOVED:
|
||||||
|
return sysFSDELETE
|
||||||
|
case windows.FILE_ACTION_MODIFIED:
|
||||||
|
return sysFSMODIFY
|
||||||
|
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
return sysFSMOVEDFROM
|
||||||
|
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
return sysFSMOVEDTO
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *readDirChangesW) xSupports(op Op) bool {
|
||||||
|
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||||
|
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
496
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
496
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
@@ -0,0 +1,496 @@
|
|||||||
|
// Package fsnotify provides a cross-platform interface for file system
|
||||||
|
// notifications.
|
||||||
|
//
|
||||||
|
// Currently supported systems:
|
||||||
|
//
|
||||||
|
// - Linux via inotify
|
||||||
|
// - BSD, macOS via kqueue
|
||||||
|
// - Windows via ReadDirectoryChangesW
|
||||||
|
// - illumos via FEN
|
||||||
|
//
|
||||||
|
// # FSNOTIFY_DEBUG
|
||||||
|
//
|
||||||
|
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
|
||||||
|
// stderr. This can be useful to track down some problems, especially in cases
|
||||||
|
// where fsnotify is used as an indirect dependency.
|
||||||
|
//
|
||||||
|
// Every event will be printed as soon as there's something useful to print,
|
||||||
|
// with as little processing from fsnotify.
|
||||||
|
//
|
||||||
|
// Example output:
|
||||||
|
//
|
||||||
|
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
|
||||||
|
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
|
||||||
|
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of paths, delivering events on a channel.
|
||||||
|
//
|
||||||
|
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||||
|
// value).
|
||||||
|
//
|
||||||
|
// # Linux notes
|
||||||
|
//
|
||||||
|
// When a file is removed a Remove event won't be emitted until all file
|
||||||
|
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||||
|
//
|
||||||
|
// fp := os.Open("file")
|
||||||
|
// os.Remove("file") // Triggers Chmod
|
||||||
|
// fp.Close() // Triggers Remove
|
||||||
|
//
|
||||||
|
// This is the event that inotify sends, so not much can be changed about this.
|
||||||
|
//
|
||||||
|
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||||
|
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||||
|
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||||
|
// create is an "instance", and every path you add is a "watch".
|
||||||
|
//
|
||||||
|
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||||
|
// /proc/sys/fs/inotify/max_user_instances
|
||||||
|
//
|
||||||
|
// To increase them you can use sysctl or write the value to the /proc file:
|
||||||
|
//
|
||||||
|
// # Default values on Linux 5.18
|
||||||
|
// sysctl fs.inotify.max_user_watches=124983
|
||||||
|
// sysctl fs.inotify.max_user_instances=128
|
||||||
|
//
|
||||||
|
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||||
|
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||||
|
// your distro's documentation):
|
||||||
|
//
|
||||||
|
// fs.inotify.max_user_watches=124983
|
||||||
|
// fs.inotify.max_user_instances=128
|
||||||
|
//
|
||||||
|
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
|
// files" error.
|
||||||
|
//
|
||||||
|
// # kqueue notes (macOS, BSD)
|
||||||
|
//
|
||||||
|
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||||
|
// so if you're watching a directory with five files then that's six file
|
||||||
|
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||||
|
// these platforms.
|
||||||
|
//
|
||||||
|
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||||
|
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||||
|
// systems.
|
||||||
|
//
|
||||||
|
// # Windows notes
|
||||||
|
//
|
||||||
|
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||||
|
// ("C:/path/to/dir") will also work.
|
||||||
|
//
|
||||||
|
// When a watched directory is removed it will always send an event for the
|
||||||
|
// directory itself, but may not send events for all files in that directory.
|
||||||
|
// Sometimes it will send events for all files, sometimes it will send no
|
||||||
|
// events, and often only for some files.
|
||||||
|
//
|
||||||
|
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||||
|
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||||
|
// events in quick succession this may not be enough, and you will have to use
|
||||||
|
// [WithBufferSize] to increase the value.
|
||||||
|
type Watcher struct {
|
||||||
|
b backend
|
||||||
|
|
||||||
|
// Events sends the filesystem change events.
|
||||||
|
//
|
||||||
|
// fsnotify can send the following events; a "path" here can refer to a
|
||||||
|
// file, directory, symbolic link, or special file like a FIFO.
|
||||||
|
//
|
||||||
|
// fsnotify.Create A new path was created; this may be followed by one
|
||||||
|
// or more Write events if data also gets written to a
|
||||||
|
// file.
|
||||||
|
//
|
||||||
|
// fsnotify.Remove A path was removed.
|
||||||
|
//
|
||||||
|
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||||
|
// old path as Event.Name, and a Create event will be
|
||||||
|
// sent with the new name. Renames are only sent for
|
||||||
|
// paths that are currently watched; e.g. moving an
|
||||||
|
// unmonitored file into a monitored directory will
|
||||||
|
// show up as just a Create. Similarly, renaming a file
|
||||||
|
// to outside a monitored directory will show up as
|
||||||
|
// only a Rename.
|
||||||
|
//
|
||||||
|
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||||
|
// also trigger a Write. A single "write action"
|
||||||
|
// initiated by the user may show up as one or multiple
|
||||||
|
// writes, depending on when the system syncs things to
|
||||||
|
// disk. For example when compiling a large Go program
|
||||||
|
// you may get hundreds of Write events, and you may
|
||||||
|
// want to wait until you've stopped receiving them
|
||||||
|
// (see the dedup example in cmd/fsnotify).
|
||||||
|
//
|
||||||
|
// Some systems may send Write event for directories
|
||||||
|
// when the directory content changes.
|
||||||
|
//
|
||||||
|
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||||
|
// when a file is removed (or more accurately, when a
|
||||||
|
// link to an inode is removed). On kqueue it's sent
|
||||||
|
// when a file is truncated. On Windows it's never
|
||||||
|
// sent.
|
||||||
|
Events chan Event
|
||||||
|
|
||||||
|
// Errors sends any errors.
|
||||||
|
Errors chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event represents a file system notification.
|
||||||
|
type Event struct {
|
||||||
|
// Path to the file or directory.
|
||||||
|
//
|
||||||
|
// Paths are relative to the input; for example with Add("dir") the Name
|
||||||
|
// will be set to "dir/file" if you create that file, but if you use
|
||||||
|
// Add("/path/to/dir") it will be "/path/to/dir/file".
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// File operation that triggered the event.
|
||||||
|
//
|
||||||
|
// This is a bitmask and some systems may send multiple operations at once.
|
||||||
|
// Use the Event.Has() method instead of comparing with ==.
|
||||||
|
Op Op
|
||||||
|
|
||||||
|
// Create events will have this set to the old path if it's a rename. This
|
||||||
|
// only works when both the source and destination are watched. It's not
|
||||||
|
// reliable when watching individual files, only directories.
|
||||||
|
//
|
||||||
|
// For example "mv /tmp/file /tmp/rename" will emit:
|
||||||
|
//
|
||||||
|
// Event{Op: Rename, Name: "/tmp/file"}
|
||||||
|
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
|
||||||
|
renamedFrom string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op describes a set of file operations.
|
||||||
|
type Op uint32
|
||||||
|
|
||||||
|
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||||
|
// full description, and check them with [Event.Has].
|
||||||
|
const (
|
||||||
|
// A new pathname was created.
|
||||||
|
Create Op = 1 << iota
|
||||||
|
|
||||||
|
// The pathname was written to; this does *not* mean the write has finished,
|
||||||
|
// and a write can be followed by more writes.
|
||||||
|
Write
|
||||||
|
|
||||||
|
// The path was removed; any watches on it will be removed. Some "remove"
|
||||||
|
// operations may trigger a Rename if the file is actually moved (for
|
||||||
|
// example "remove to trash" is often a rename).
|
||||||
|
Remove
|
||||||
|
|
||||||
|
// The path was renamed to something else; any watches on it will be
|
||||||
|
// removed.
|
||||||
|
Rename
|
||||||
|
|
||||||
|
// File attributes were changed.
|
||||||
|
//
|
||||||
|
// It's generally not recommended to take action on this event, as it may
|
||||||
|
// get triggered very frequently by some software. For example, Spotlight
|
||||||
|
// indexing on macOS, anti-virus software, backup software, etc.
|
||||||
|
Chmod
|
||||||
|
|
||||||
|
// File descriptor was opened.
|
||||||
|
//
|
||||||
|
// Only works on Linux and FreeBSD.
|
||||||
|
xUnportableOpen
|
||||||
|
|
||||||
|
// File was read from.
|
||||||
|
//
|
||||||
|
// Only works on Linux and FreeBSD.
|
||||||
|
xUnportableRead
|
||||||
|
|
||||||
|
// File opened for writing was closed.
|
||||||
|
//
|
||||||
|
// Only works on Linux and FreeBSD.
|
||||||
|
//
|
||||||
|
// The advantage of using this over Write is that it's more reliable than
|
||||||
|
// waiting for Write events to stop. It's also faster (if you're not
|
||||||
|
// listening to Write events): copying a file of a few GB can easily
|
||||||
|
// generate tens of thousands of Write events in a short span of time.
|
||||||
|
xUnportableCloseWrite
|
||||||
|
|
||||||
|
// File opened for reading was closed.
|
||||||
|
//
|
||||||
|
// Only works on Linux and FreeBSD.
|
||||||
|
xUnportableCloseRead
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNonExistentWatch is used when Remove() is called on a path that's not
|
||||||
|
// added.
|
||||||
|
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||||
|
|
||||||
|
// ErrClosed is used when trying to operate on a closed Watcher.
|
||||||
|
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||||
|
|
||||||
|
// ErrEventOverflow is reported from the Errors channel when there are too
|
||||||
|
// many events:
|
||||||
|
//
|
||||||
|
// - inotify: inotify returns IN_Q_OVERFLOW – because there are too
|
||||||
|
// many queued events (the fs.inotify.max_queued_events
|
||||||
|
// sysctl can be used to increase this).
|
||||||
|
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||||
|
// - kqueue, fen: Not used.
|
||||||
|
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||||
|
|
||||||
|
// ErrUnsupported is returned by AddWith() when WithOps() specified an
|
||||||
|
// Unportable event that's not supported on this platform.
|
||||||
|
//lint:ignore ST1012 not relevant
|
||||||
|
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewWatcher creates a new Watcher.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
|
||||||
|
b, err := newBackend(ev, errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||||
|
// channel.
|
||||||
|
//
|
||||||
|
// The main use case for this is situations with a very large number of events
|
||||||
|
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||||
|
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||||
|
// cases, and whenever possible you will be better off increasing the kernel
|
||||||
|
// buffers instead of adding a large userspace buffer.
|
||||||
|
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||||
|
ev, errs := make(chan Event, sz), make(chan error)
|
||||||
|
b, err := newBackend(ev, errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts monitoring the path for changes.
|
||||||
|
//
|
||||||
|
// A path can only be watched once; watching it more than once is a no-op and will
|
||||||
|
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||||
|
// watched.
|
||||||
|
//
|
||||||
|
// A watch will be automatically removed if the watched path is deleted or
|
||||||
|
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||||
|
// watcher on renames.
|
||||||
|
//
|
||||||
|
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||||
|
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||||
|
//
|
||||||
|
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||||
|
//
|
||||||
|
// See [Watcher.AddWith] for a version that allows adding options.
|
||||||
|
//
|
||||||
|
// # Watching directories
|
||||||
|
//
|
||||||
|
// All files in a directory are monitored, including new files that are created
|
||||||
|
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||||
|
// non-recursive).
|
||||||
|
//
|
||||||
|
// # Watching files
|
||||||
|
//
|
||||||
|
// Watching individual files (rather than directories) is generally not
|
||||||
|
// recommended as many programs (especially editors) update files atomically: it
|
||||||
|
// will write to a temporary file which is then moved to destination,
|
||||||
|
// overwriting the original (or some variant thereof). The watcher on the
|
||||||
|
// original file is now lost, as that no longer exists.
|
||||||
|
//
|
||||||
|
// The upshot of this is that a power failure or crash won't leave a
|
||||||
|
// half-written file.
|
||||||
|
//
|
||||||
|
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||||
|
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||||
|
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
|
||||||
|
|
||||||
|
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||||
|
// the defaults described below are used.
|
||||||
|
//
|
||||||
|
// Possible options are:
|
||||||
|
//
|
||||||
|
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||||
|
// other platforms. The default is 64K (65536 bytes).
|
||||||
|
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
|
||||||
|
|
||||||
|
// Remove stops monitoring the path for changes.
|
||||||
|
//
|
||||||
|
// Directories are always removed non-recursively. For example, if you added
|
||||||
|
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||||
|
//
|
||||||
|
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
|
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
|
||||||
|
|
||||||
|
// Close removes all watches and closes the Events channel.
|
||||||
|
func (w *Watcher) Close() error { return w.b.Close() }
|
||||||
|
|
||||||
|
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||||
|
// yet removed).
|
||||||
|
//
|
||||||
|
// The order is undefined, and may differ per call. Returns nil if
|
||||||
|
// [Watcher.Close] was called.
|
||||||
|
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
|
||||||
|
|
||||||
|
// Supports reports if all the listed operations are supported by this platform.
|
||||||
|
//
|
||||||
|
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
|
||||||
|
// return false for an Op starting with Unportable.
|
||||||
|
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
|
||||||
|
|
||||||
|
func (o Op) String() string {
|
||||||
|
var b strings.Builder
|
||||||
|
if o.Has(Create) {
|
||||||
|
b.WriteString("|CREATE")
|
||||||
|
}
|
||||||
|
if o.Has(Remove) {
|
||||||
|
b.WriteString("|REMOVE")
|
||||||
|
}
|
||||||
|
if o.Has(Write) {
|
||||||
|
b.WriteString("|WRITE")
|
||||||
|
}
|
||||||
|
if o.Has(xUnportableOpen) {
|
||||||
|
b.WriteString("|OPEN")
|
||||||
|
}
|
||||||
|
if o.Has(xUnportableRead) {
|
||||||
|
b.WriteString("|READ")
|
||||||
|
}
|
||||||
|
if o.Has(xUnportableCloseWrite) {
|
||||||
|
b.WriteString("|CLOSE_WRITE")
|
||||||
|
}
|
||||||
|
if o.Has(xUnportableCloseRead) {
|
||||||
|
b.WriteString("|CLOSE_READ")
|
||||||
|
}
|
||||||
|
if o.Has(Rename) {
|
||||||
|
b.WriteString("|RENAME")
|
||||||
|
}
|
||||||
|
if o.Has(Chmod) {
|
||||||
|
b.WriteString("|CHMOD")
|
||||||
|
}
|
||||||
|
if b.Len() == 0 {
|
||||||
|
return "[no events]"
|
||||||
|
}
|
||||||
|
return b.String()[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has reports if this operation has the given operation.
|
||||||
|
func (o Op) Has(h Op) bool { return o&h != 0 }
|
||||||
|
|
||||||
|
// Has reports if this event has the given operation.
|
||||||
|
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||||
|
|
||||||
|
// String returns a string representation of the event with their path.
|
||||||
|
func (e Event) String() string {
|
||||||
|
if e.renamedFrom != "" {
|
||||||
|
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
backend interface {
|
||||||
|
Add(string) error
|
||||||
|
AddWith(string, ...addOpt) error
|
||||||
|
Remove(string) error
|
||||||
|
WatchList() []string
|
||||||
|
Close() error
|
||||||
|
xSupports(Op) bool
|
||||||
|
}
|
||||||
|
addOpt func(opt *withOpts)
|
||||||
|
withOpts struct {
|
||||||
|
bufsize int
|
||||||
|
op Op
|
||||||
|
noFollow bool
|
||||||
|
sendCreate bool
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var debug = func() bool {
|
||||||
|
// Check for exactly "1" (rather than mere existence) so we can add
|
||||||
|
// options/flags in the future. I don't know if we ever want that, but it's
|
||||||
|
// nice to leave the option open.
|
||||||
|
return os.Getenv("FSNOTIFY_DEBUG") == "1"
|
||||||
|
}()
|
||||||
|
|
||||||
|
var defaultOpts = withOpts{
|
||||||
|
bufsize: 65536, // 64K
|
||||||
|
op: Create | Write | Remove | Rename | Chmod,
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOptions(opts ...addOpt) withOpts {
|
||||||
|
with := defaultOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
if o != nil {
|
||||||
|
o(&with)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return with
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
|
||||||
|
//
|
||||||
|
// This only has effect on Windows systems, and is a no-op for other backends.
|
||||||
|
//
|
||||||
|
// The default value is 64K (65536 bytes) which is the highest value that works
|
||||||
|
// on all filesystems and should be enough for most applications, but if you
|
||||||
|
// have a large burst of events it may not be enough. You can increase it if
|
||||||
|
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
|
||||||
|
//
|
||||||
|
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||||
|
func WithBufferSize(bytes int) addOpt {
|
||||||
|
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOps sets which operations to listen for. The default is [Create],
|
||||||
|
// [Write], [Remove], [Rename], and [Chmod].
|
||||||
|
//
|
||||||
|
// Excluding operations you're not interested in can save quite a bit of CPU
|
||||||
|
// time; in some use cases there may be hundreds of thousands of useless Write
|
||||||
|
// or Chmod operations per second.
|
||||||
|
//
|
||||||
|
// This can also be used to add unportable operations not supported by all
|
||||||
|
// platforms; unportable operations all start with "Unportable":
|
||||||
|
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
|
||||||
|
// [UnportableCloseRead].
|
||||||
|
//
|
||||||
|
// AddWith returns an error when using an unportable operation that's not
|
||||||
|
// supported. Use [Watcher.Support] to check for support.
|
||||||
|
func withOps(op Op) addOpt {
|
||||||
|
return func(opt *withOpts) { opt.op = op }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithNoFollow disables following symlinks, so the symlinks themselves are
|
||||||
|
// watched.
|
||||||
|
func withNoFollow() addOpt {
|
||||||
|
return func(opt *withOpts) { opt.noFollow = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// "Internal" option for recursive watches on inotify.
|
||||||
|
func withCreate() addOpt {
|
||||||
|
return func(opt *withOpts) { opt.sendCreate = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
var enableRecurse = false
|
||||||
|
|
||||||
|
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||||
|
// path with the /... stripped.
|
||||||
|
func recursivePath(path string) (string, bool) {
|
||||||
|
path = filepath.Clean(path)
|
||||||
|
if !enableRecurse { // Only enabled in tests for now.
|
||||||
|
return path, false
|
||||||
|
}
|
||||||
|
if filepath.Base(path) == "..." {
|
||||||
|
return filepath.Dir(path), true
|
||||||
|
}
|
||||||
|
return path, false
|
||||||
|
}
|
||||||
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrSyscallEACCES = syscall.EACCES
|
||||||
|
ErrUnixEACCES = unix.EACCES
|
||||||
|
)
|
||||||
|
|
||||||
|
var maxfiles uint64
|
||||||
|
|
||||||
|
func SetRlimit() {
|
||||||
|
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||||
|
var l syscall.Rlimit
|
||||||
|
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||||
|
if err == nil && l.Cur != l.Max {
|
||||||
|
l.Cur = l.Max
|
||||||
|
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||||
|
}
|
||||||
|
maxfiles = l.Cur
|
||||||
|
|
||||||
|
if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
|
||||||
|
maxfiles = uint64(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
|
||||||
|
maxfiles = uint64(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Maxfiles() uint64 { return maxfiles }
|
||||||
|
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||||
|
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||||
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
var names = []struct {
|
||||||
|
n string
|
||||||
|
m uint32
|
||||||
|
}{
|
||||||
|
{"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
|
||||||
|
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||||
|
{"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
|
||||||
|
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||||
|
{"NOTE_CRITICAL", unix.NOTE_CRITICAL},
|
||||||
|
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||||
|
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||||
|
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||||
|
{"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
|
||||||
|
{"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
|
||||||
|
{"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
|
||||||
|
{"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
|
||||||
|
{"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
|
||||||
|
{"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
|
||||||
|
{"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
|
||||||
|
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||||
|
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||||
|
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||||
|
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||||
|
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||||
|
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||||
|
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||||
|
{"NOTE_FORK", unix.NOTE_FORK},
|
||||||
|
{"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
|
||||||
|
{"NOTE_LEEWAY", unix.NOTE_LEEWAY},
|
||||||
|
{"NOTE_LINK", unix.NOTE_LINK},
|
||||||
|
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||||
|
{"NOTE_MACHTIME", unix.NOTE_MACHTIME},
|
||||||
|
{"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
|
||||||
|
{"NOTE_NONE", unix.NOTE_NONE},
|
||||||
|
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||||
|
{"NOTE_OOB", unix.NOTE_OOB},
|
||||||
|
//{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
|
||||||
|
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||||
|
{"NOTE_REAP", unix.NOTE_REAP},
|
||||||
|
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||||
|
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||||
|
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||||
|
{"NOTE_SIGNAL", unix.NOTE_SIGNAL},
|
||||||
|
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||||
|
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||||
|
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||||
|
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||||
|
{"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
|
||||||
|
{"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
|
||||||
|
{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
|
||||||
|
{"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
|
||||||
|
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||||
|
}
|
||||||
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
var names = []struct {
|
||||||
|
n string
|
||||||
|
m uint32
|
||||||
|
}{
|
||||||
|
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||||
|
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||||
|
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||||
|
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||||
|
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||||
|
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||||
|
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||||
|
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||||
|
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||||
|
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||||
|
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||||
|
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||||
|
{"NOTE_FORK", unix.NOTE_FORK},
|
||||||
|
{"NOTE_LINK", unix.NOTE_LINK},
|
||||||
|
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||||
|
{"NOTE_OOB", unix.NOTE_OOB},
|
||||||
|
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||||
|
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||||
|
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||||
|
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||||
|
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||||
|
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||||
|
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||||
|
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||||
|
}
|
||||||
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
var names = []struct {
|
||||||
|
n string
|
||||||
|
m uint32
|
||||||
|
}{
|
||||||
|
{"NOTE_ABSTIME", unix.NOTE_ABSTIME},
|
||||||
|
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||||
|
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||||
|
{"NOTE_CLOSE", unix.NOTE_CLOSE},
|
||||||
|
{"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
|
||||||
|
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||||
|
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||||
|
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||||
|
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||||
|
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||||
|
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||||
|
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||||
|
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||||
|
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||||
|
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||||
|
{"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
|
||||||
|
{"NOTE_FORK", unix.NOTE_FORK},
|
||||||
|
{"NOTE_LINK", unix.NOTE_LINK},
|
||||||
|
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||||
|
{"NOTE_MSECONDS", unix.NOTE_MSECONDS},
|
||||||
|
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||||
|
{"NOTE_OPEN", unix.NOTE_OPEN},
|
||||||
|
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||||
|
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||||
|
{"NOTE_READ", unix.NOTE_READ},
|
||||||
|
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||||
|
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||||
|
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||||
|
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||||
|
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||||
|
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||||
|
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||||
|
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||||
|
}
|
||||||
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Debug(name string, kevent *unix.Kevent_t) {
|
||||||
|
mask := uint32(kevent.Fflags)
|
||||||
|
|
||||||
|
var (
|
||||||
|
l []string
|
||||||
|
unknown = mask
|
||||||
|
)
|
||||||
|
for _, n := range names {
|
||||||
|
if mask&n.m == n.m {
|
||||||
|
l = append(l, n.n)
|
||||||
|
unknown ^= n.m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if unknown > 0 {
|
||||||
|
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||||
|
}
|
||||||
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Debug(name string, mask, cookie uint32) {
|
||||||
|
names := []struct {
|
||||||
|
n string
|
||||||
|
m uint32
|
||||||
|
}{
|
||||||
|
{"IN_ACCESS", unix.IN_ACCESS},
|
||||||
|
{"IN_ATTRIB", unix.IN_ATTRIB},
|
||||||
|
{"IN_CLOSE", unix.IN_CLOSE},
|
||||||
|
{"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
|
||||||
|
{"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
|
||||||
|
{"IN_CREATE", unix.IN_CREATE},
|
||||||
|
{"IN_DELETE", unix.IN_DELETE},
|
||||||
|
{"IN_DELETE_SELF", unix.IN_DELETE_SELF},
|
||||||
|
{"IN_IGNORED", unix.IN_IGNORED},
|
||||||
|
{"IN_ISDIR", unix.IN_ISDIR},
|
||||||
|
{"IN_MODIFY", unix.IN_MODIFY},
|
||||||
|
{"IN_MOVE", unix.IN_MOVE},
|
||||||
|
{"IN_MOVED_FROM", unix.IN_MOVED_FROM},
|
||||||
|
{"IN_MOVED_TO", unix.IN_MOVED_TO},
|
||||||
|
{"IN_MOVE_SELF", unix.IN_MOVE_SELF},
|
||||||
|
{"IN_OPEN", unix.IN_OPEN},
|
||||||
|
{"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
|
||||||
|
{"IN_UNMOUNT", unix.IN_UNMOUNT},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
l []string
|
||||||
|
unknown = mask
|
||||||
|
)
|
||||||
|
for _, n := range names {
|
||||||
|
if mask&n.m == n.m {
|
||||||
|
l = append(l, n.n)
|
||||||
|
unknown ^= n.m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if unknown > 0 {
|
||||||
|
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||||
|
}
|
||||||
|
var c string
|
||||||
|
if cookie > 0 {
|
||||||
|
c = fmt.Sprintf("(cookie: %d) ", cookie)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
|
||||||
|
}
|
||||||
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
var names = []struct {
|
||||||
|
n string
|
||||||
|
m uint32
|
||||||
|
}{
|
||||||
|
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||||
|
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||||
|
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||||
|
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||||
|
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||||
|
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||||
|
{"NOTE_FORK", unix.NOTE_FORK},
|
||||||
|
{"NOTE_LINK", unix.NOTE_LINK},
|
||||||
|
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||||
|
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||||
|
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||||
|
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||||
|
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||||
|
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||||
|
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||||
|
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||||
|
}
|
||||||
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
var names = []struct {
|
||||||
|
n string
|
||||||
|
m uint32
|
||||||
|
}{
|
||||||
|
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||||
|
// {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
|
||||||
|
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||||
|
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||||
|
{"NOTE_EOF", unix.NOTE_EOF},
|
||||||
|
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||||
|
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||||
|
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||||
|
{"NOTE_FORK", unix.NOTE_FORK},
|
||||||
|
{"NOTE_LINK", unix.NOTE_LINK},
|
||||||
|
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||||
|
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||||
|
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||||
|
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||||
|
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||||
|
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||||
|
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||||
|
{"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
|
||||||
|
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||||
|
}
|
||||||
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Debug(name string, mask int32) {
|
||||||
|
names := []struct {
|
||||||
|
n string
|
||||||
|
m int32
|
||||||
|
}{
|
||||||
|
{"FILE_ACCESS", unix.FILE_ACCESS},
|
||||||
|
{"FILE_MODIFIED", unix.FILE_MODIFIED},
|
||||||
|
{"FILE_ATTRIB", unix.FILE_ATTRIB},
|
||||||
|
{"FILE_TRUNC", unix.FILE_TRUNC},
|
||||||
|
{"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
|
||||||
|
{"FILE_DELETE", unix.FILE_DELETE},
|
||||||
|
{"FILE_RENAME_TO", unix.FILE_RENAME_TO},
|
||||||
|
{"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
|
||||||
|
{"UNMOUNTED", unix.UNMOUNTED},
|
||||||
|
{"MOUNTEDOVER", unix.MOUNTEDOVER},
|
||||||
|
{"FILE_EXCEPTION", unix.FILE_EXCEPTION},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
l []string
|
||||||
|
unknown = mask
|
||||||
|
)
|
||||||
|
for _, n := range names {
|
||||||
|
if mask&n.m == n.m {
|
||||||
|
l = append(l, n.n)
|
||||||
|
unknown ^= n.m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if unknown > 0 {
|
||||||
|
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||||
|
}
|
||||||
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Debug(name string, mask uint32) {
|
||||||
|
names := []struct {
|
||||||
|
n string
|
||||||
|
m uint32
|
||||||
|
}{
|
||||||
|
{"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
|
||||||
|
{"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
|
||||||
|
{"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
|
||||||
|
{"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
|
||||||
|
{"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
l []string
|
||||||
|
unknown = mask
|
||||||
|
)
|
||||||
|
for _, n := range names {
|
||||||
|
if mask&n.m == n.m {
|
||||||
|
l = append(l, n.n)
|
||||||
|
unknown ^= n.m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if unknown > 0 {
|
||||||
|
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
|
||||||
|
time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
|
||||||
|
}
|
||||||
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
//go:build freebsd
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrSyscallEACCES = syscall.EACCES
|
||||||
|
ErrUnixEACCES = unix.EACCES
|
||||||
|
)
|
||||||
|
|
||||||
|
var maxfiles uint64
|
||||||
|
|
||||||
|
func SetRlimit() {
|
||||||
|
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||||
|
var l syscall.Rlimit
|
||||||
|
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||||
|
if err == nil && l.Cur != l.Max {
|
||||||
|
l.Cur = l.Max
|
||||||
|
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||||
|
}
|
||||||
|
maxfiles = uint64(l.Cur)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Maxfiles() uint64 { return maxfiles }
|
||||||
|
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||||
|
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
|
||||||
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
// Package internal contains some helpers.
|
||||||
|
package internal
|
||||||
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
//go:build !windows && !darwin && !freebsd && !plan9
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrSyscallEACCES = syscall.EACCES
|
||||||
|
ErrUnixEACCES = unix.EACCES
|
||||||
|
)
|
||||||
|
|
||||||
|
var maxfiles uint64
|
||||||
|
|
||||||
|
func SetRlimit() {
|
||||||
|
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||||
|
var l syscall.Rlimit
|
||||||
|
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||||
|
if err == nil && l.Cur != l.Max {
|
||||||
|
l.Cur = l.Max
|
||||||
|
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||||
|
}
|
||||||
|
maxfiles = uint64(l.Cur)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Maxfiles() uint64 { return maxfiles }
|
||||||
|
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||||
|
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||||
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
func HasPrivilegesForSymlink() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Just a dummy.
|
||||||
|
var (
|
||||||
|
ErrSyscallEACCES = errors.New("dummy")
|
||||||
|
ErrUnixEACCES = errors.New("dummy")
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetRlimit() {}
|
||||||
|
func Maxfiles() uint64 { return 1<<64 - 1 }
|
||||||
|
func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
|
||||||
|
func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
|
||||||
|
|
||||||
|
func HasPrivilegesForSymlink() bool {
|
||||||
|
var sid *windows.SID
|
||||||
|
err := windows.AllocateAndInitializeSid(
|
||||||
|
&windows.SECURITY_NT_AUTHORITY,
|
||||||
|
2,
|
||||||
|
windows.SECURITY_BUILTIN_DOMAIN_RID,
|
||||||
|
windows.DOMAIN_ALIAS_RID_ADMINS,
|
||||||
|
0, 0, 0, 0, 0, 0,
|
||||||
|
&sid)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer windows.FreeSid(sid)
|
||||||
|
token := windows.Token(0)
|
||||||
|
member, err := token.IsMember(sid)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return member || token.IsElevated()
|
||||||
|
}
|
||||||
64
vendor/github.com/fsnotify/fsnotify/shared.go
generated
vendored
Normal file
64
vendor/github.com/fsnotify/fsnotify/shared.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type shared struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
done chan struct{}
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newShared(ev chan Event, errs chan error) *shared {
|
||||||
|
return &shared{
|
||||||
|
Events: ev,
|
||||||
|
Errors: errs,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the event was sent, or false if watcher is closed.
|
||||||
|
func (w *shared) sendEvent(e Event) bool {
|
||||||
|
if e.Op == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return false
|
||||||
|
case w.Events <- e:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the error was sent, or false if watcher is closed.
|
||||||
|
func (w *shared) sendError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return false
|
||||||
|
case w.Errors <- err:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *shared) isClosed() bool {
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark as closed; returns true if it was already closed.
|
||||||
|
func (w *shared) close() bool {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
if w.isClosed() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
close(w.done)
|
||||||
|
return false
|
||||||
|
}
|
||||||
3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf
generated
vendored
Normal file
3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
checks = ['all',
|
||||||
|
'-U1000', # Don't complain about unused functions.
|
||||||
|
]
|
||||||
7
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
||||||
8
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
Normal file
8
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
// note: this constant is not defined on BSD
|
||||||
|
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
|
||||||
3
vendor/github.com/go-chi/chi/v5/.gitignore
generated
vendored
Normal file
3
vendor/github.com/go-chi/chi/v5/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
.idea
|
||||||
|
*.sw?
|
||||||
|
.vscode
|
||||||
341
vendor/github.com/go-chi/chi/v5/CHANGELOG.md
generated
vendored
Normal file
341
vendor/github.com/go-chi/chi/v5/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## v5.0.12 (2024-02-16)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.11...v5.0.12
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.11 (2023-12-19)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.10...v5.0.11
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.10 (2023-07-13)
|
||||||
|
|
||||||
|
- Fixed small edge case in tests of v5.0.9 for older Go versions
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.9...v5.0.10
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.9 (2023-07-13)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.8...v5.0.9
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.8 (2022-12-07)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.7...v5.0.8
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.7 (2021-11-18)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.6...v5.0.7
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.6 (2021-11-15)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.5...v5.0.6
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.5 (2021-10-27)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.4...v5.0.5
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.4 (2021-08-29)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.3...v5.0.4
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.3 (2021-04-29)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.2...v5.0.3
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.2 (2021-03-25)
|
||||||
|
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.1...v5.0.2
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.1 (2021-03-10)
|
||||||
|
|
||||||
|
- Small improvements
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.0...v5.0.1
|
||||||
|
|
||||||
|
|
||||||
|
## v5.0.0 (2021-02-27)
|
||||||
|
|
||||||
|
- chi v5, `github.com/go-chi/chi/v5` introduces the adoption of Go's SIV to adhere to the current state-of-the-tools in Go.
|
||||||
|
- chi v1.5.x did not work out as planned, as the Go tooling is too powerful and chi's adoption is too wide.
|
||||||
|
The most responsible thing to do for everyone's benefit is to just release v5 with SIV, so I present to you all,
|
||||||
|
chi v5 at `github.com/go-chi/chi/v5`. I hope someday the developer experience and ergonomics I've been seeking
|
||||||
|
will still come to fruition in some form, see https://github.com/golang/go/issues/44550
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.4...v5.0.0
|
||||||
|
|
||||||
|
|
||||||
|
## v1.5.4 (2021-02-27)
|
||||||
|
|
||||||
|
- Undo prior retraction in v1.5.3 as we prepare for v5.0.0 release
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.3...v1.5.4
|
||||||
|
|
||||||
|
|
||||||
|
## v1.5.3 (2021-02-21)
|
||||||
|
|
||||||
|
- Update go.mod to go 1.16 with new retract directive marking all versions without prior go.mod support
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.2...v1.5.3
|
||||||
|
|
||||||
|
|
||||||
|
## v1.5.2 (2021-02-10)
|
||||||
|
|
||||||
|
- Reverting allocation optimization as a precaution as go test -race fails.
|
||||||
|
- Minor improvements, see history below
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.1...v1.5.2
|
||||||
|
|
||||||
|
|
||||||
|
## v1.5.1 (2020-12-06)
|
||||||
|
|
||||||
|
- Performance improvement: removing 1 allocation by foregoing context.WithValue, thank you @bouk for
|
||||||
|
your contribution (https://github.com/go-chi/chi/pull/555). Note: new benchmarks posted in README.
|
||||||
|
- `middleware.CleanPath`: new middleware that clean's request path of double slashes
|
||||||
|
- deprecate & remove `chi.ServerBaseContext` in favour of stdlib `http.Server#BaseContext`
|
||||||
|
- plus other tiny improvements, see full commit history below
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.2...v1.5.1
|
||||||
|
|
||||||
|
|
||||||
|
## v1.5.0 (2020-11-12) - now with go.mod support
|
||||||
|
|
||||||
|
`chi` dates back to 2016 with it's original implementation as one of the first routers to adopt the newly introduced
|
||||||
|
context.Context api to the stdlib -- set out to design a router that is faster, more modular and simpler than anything
|
||||||
|
else out there -- while not introducing any custom handler types or dependencies. Today, `chi` still has zero dependencies,
|
||||||
|
and in many ways is future proofed from changes, given it's minimal nature. Between versions, chi's iterations have been very
|
||||||
|
incremental, with the architecture and api being the same today as it was originally designed in 2016. For this reason it
|
||||||
|
makes chi a pretty easy project to maintain, as well thanks to the many amazing community contributions over the years
|
||||||
|
to who all help make chi better (total of 86 contributors to date -- thanks all!).
|
||||||
|
|
||||||
|
Chi has been a labour of love, art and engineering, with the goals to offer beautiful ergonomics, flexibility, performance
|
||||||
|
and simplicity when building HTTP services with Go. I've strived to keep the router very minimal in surface area / code size,
|
||||||
|
and always improving the code wherever possible -- and as of today the `chi` package is just 1082 lines of code (not counting
|
||||||
|
middlewares, which are all optional). As well, I don't have the exact metrics, but from my analysis and email exchanges from
|
||||||
|
companies and developers, chi is used by thousands of projects around the world -- thank you all as there is no better form of
|
||||||
|
joy for me than to have art I had started be helpful and enjoyed by others. And of course I use chi in all of my own projects too :)
|
||||||
|
|
||||||
|
For me, the aesthetics of chi's code and usage are very important. With the introduction of Go's module support
|
||||||
|
(which I'm a big fan of), chi's past versioning scheme choice to v2, v3 and v4 would mean I'd require the import path
|
||||||
|
of "github.com/go-chi/chi/v4", leading to the lengthy discussion at https://github.com/go-chi/chi/issues/462.
|
||||||
|
Haha, to some, you may be scratching your head why I've spent > 1 year stalling to adopt "/vXX" convention in the import
|
||||||
|
path -- which isn't horrible in general -- but for chi, I'm unable to accept it as I strive for perfection in it's API design,
|
||||||
|
aesthetics and simplicity. It just doesn't feel good to me given chi's simple nature -- I do not foresee a "v5" or "v6",
|
||||||
|
and upgrading between versions in the future will also be just incremental.
|
||||||
|
|
||||||
|
I do understand versioning is a part of the API design as well, which is why the solution for a while has been to "do nothing",
|
||||||
|
as Go supports both old and new import paths with/out go.mod. However, now that Go module support has had time to iron out kinks and
|
||||||
|
is adopted everywhere, it's time for chi to get with the times. Luckily, I've discovered a path forward that will make me happy,
|
||||||
|
while also not breaking anyone's app who adopted a prior versioning from tags in v2/v3/v4. I've made an experimental release of
|
||||||
|
v1.5.0 with go.mod silently, and tested it with new and old projects, to ensure the developer experience is preserved, and it's
|
||||||
|
largely unnoticed. Fortunately, Go's toolchain will check the tags of a repo and consider the "latest" tag the one with go.mod.
|
||||||
|
However, you can still request a specific older tag such as v4.1.2, and everything will "just work". But new users can just
|
||||||
|
`go get github.com/go-chi/chi` or `go get github.com/go-chi/chi@latest` and they will get the latest version which contains
|
||||||
|
go.mod support, which is v1.5.0+. `chi` will not change very much over the years, just like it hasn't changed much from 4 years ago.
|
||||||
|
Therefore, we will stay on v1.x from here on, starting from v1.5.0. Any breaking changes will bump a "minor" release and
|
||||||
|
backwards-compatible improvements/fixes will bump a "tiny" release.
|
||||||
|
|
||||||
|
For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`,
|
||||||
|
which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). Brand new systems can run
|
||||||
|
`go get -u github.com/go-chi/chi` or `go get -u github.com/go-chi/chi@latest` to install chi, which will install v1.5.0+
|
||||||
|
built with go.mod support.
|
||||||
|
|
||||||
|
My apologies to the developers who will disagree with the decisions above, but, hope you'll try it and see it's a very
|
||||||
|
minor request which is backwards compatible and won't break your existing installations.
|
||||||
|
|
||||||
|
Cheers all, happy coding!
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
## v4.1.2 (2020-06-02)
|
||||||
|
|
||||||
|
- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution
|
||||||
|
- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2
|
||||||
|
|
||||||
|
|
||||||
|
## v4.1.1 (2020-04-16)
|
||||||
|
|
||||||
|
- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp
|
||||||
|
route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix!
|
||||||
|
- new middleware.RouteHeaders as a simple router for request headers with wildcard support
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1
|
||||||
|
|
||||||
|
|
||||||
|
## v4.1.0 (2020-04-1)
|
||||||
|
|
||||||
|
- middleware.LogEntry: Write method on interface now passes the response header
|
||||||
|
and an extra interface type useful for custom logger implementations.
|
||||||
|
- middleware.WrapResponseWriter: minor fix
|
||||||
|
- middleware.Recoverer: a bit prettier
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0
|
||||||
|
|
||||||
|
## v4.0.4 (2020-03-24)
|
||||||
|
|
||||||
|
- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496)
|
||||||
|
- a few minor improvements and fixes
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4
|
||||||
|
|
||||||
|
|
||||||
|
## v4.0.3 (2020-01-09)
|
||||||
|
|
||||||
|
- core: fix regexp routing to include default value when param is not matched
|
||||||
|
- middleware: rewrite of middleware.Compress
|
||||||
|
- middleware: suppress http.ErrAbortHandler in middleware.Recoverer
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3
|
||||||
|
|
||||||
|
|
||||||
|
## v4.0.2 (2019-02-26)
|
||||||
|
|
||||||
|
- Minor fixes
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2
|
||||||
|
|
||||||
|
|
||||||
|
## v4.0.1 (2019-01-21)
|
||||||
|
|
||||||
|
- Fixes issue with compress middleware: #382 #385
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1
|
||||||
|
|
||||||
|
|
||||||
|
## v4.0.0 (2019-01-10)
|
||||||
|
|
||||||
|
- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
|
||||||
|
- router: respond with 404 on router with no routes (#362)
|
||||||
|
- router: additional check to ensure wildcard is at the end of a url pattern (#333)
|
||||||
|
- middleware: deprecate use of http.CloseNotifier (#347)
|
||||||
|
- middleware: fix RedirectSlashes to include query params on redirect (#334)
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
|
||||||
|
|
||||||
|
|
||||||
|
## v3.3.4 (2019-01-07)
|
||||||
|
|
||||||
|
- Minor middleware improvements. No changes to core library/router. Moving v3 into its
|
||||||
|
- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
|
||||||
|
- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
|
||||||
|
|
||||||
|
|
||||||
|
## v3.3.3 (2018-08-27)
|
||||||
|
|
||||||
|
- Minor release
|
||||||
|
- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
|
||||||
|
|
||||||
|
|
||||||
|
## v3.3.2 (2017-12-22)
|
||||||
|
|
||||||
|
- Support to route trailing slashes on mounted sub-routers (#281)
|
||||||
|
- middleware: new `ContentCharset` to check matching charsets. Thank you
|
||||||
|
@csucu for your community contribution!
|
||||||
|
|
||||||
|
|
||||||
|
## v3.3.1 (2017-11-20)
|
||||||
|
|
||||||
|
- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
|
||||||
|
- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
|
||||||
|
- Minor bug fixes
|
||||||
|
|
||||||
|
|
||||||
|
## v3.3.0 (2017-10-10)
|
||||||
|
|
||||||
|
- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
|
||||||
|
- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
|
||||||
|
|
||||||
|
|
||||||
|
## v3.2.1 (2017-08-31)
|
||||||
|
|
||||||
|
- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
|
||||||
|
and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
|
||||||
|
- Add new `RouteMethod` to `*Context`
|
||||||
|
- Add new `Routes` pointer to `*Context`
|
||||||
|
- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
|
||||||
|
- Updated benchmarks (see README)
|
||||||
|
|
||||||
|
|
||||||
|
## v3.1.5 (2017-08-02)
|
||||||
|
|
||||||
|
- Setup golint and go vet for the project
|
||||||
|
- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
|
||||||
|
to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
|
||||||
|
|
||||||
|
|
||||||
|
## v3.1.0 (2017-07-10)
|
||||||
|
|
||||||
|
- Fix a few minor issues after v3 release
|
||||||
|
- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
|
||||||
|
- Move `render` sub-pkg to https://github.com/go-chi/render
|
||||||
|
- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
|
||||||
|
suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
|
||||||
|
https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
|
||||||
|
|
||||||
|
|
||||||
|
## v3.0.0 (2017-06-21)
|
||||||
|
|
||||||
|
- Major update to chi library with many exciting updates, but also some *breaking changes*
|
||||||
|
- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
|
||||||
|
`/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
|
||||||
|
same router
|
||||||
|
- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
|
||||||
|
`r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
|
||||||
|
- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
|
||||||
|
`r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
|
||||||
|
in `_examples/custom-handler`
|
||||||
|
- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
|
||||||
|
own using file handler with the stdlib, see `_examples/fileserver` for an example
|
||||||
|
- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
|
||||||
|
- Moved the chi project to its own organization, to allow chi-related community packages to
|
||||||
|
be easily discovered and supported, at: https://github.com/go-chi
|
||||||
|
- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
|
||||||
|
- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
|
||||||
|
|
||||||
|
|
||||||
|
## v2.1.0 (2017-03-30)
|
||||||
|
|
||||||
|
- Minor improvements and update to the chi core library
|
||||||
|
- Introduced a brand new `chi/render` sub-package to complete the story of building
|
||||||
|
APIs to offer a pattern for managing well-defined request / response payloads. Please
|
||||||
|
check out the updated `_examples/rest` example for how it works.
|
||||||
|
- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
|
||||||
|
|
||||||
|
|
||||||
|
## v2.0.0 (2017-01-06)
|
||||||
|
|
||||||
|
- After many months of v2 being in an RC state with many companies and users running it in
|
||||||
|
production, the inclusion of some improvements to the middlewares, we are very pleased to
|
||||||
|
announce v2.0.0 of chi.
|
||||||
|
|
||||||
|
|
||||||
|
## v2.0.0-rc1 (2016-07-26)
|
||||||
|
|
||||||
|
- Huge update! chi v2 is a large refactor targeting Go 1.7+. As of Go 1.7, the popular
|
||||||
|
community `"net/context"` package has been included in the standard library as `"context"` and
|
||||||
|
utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
|
||||||
|
request-scoped values. We're very excited about the new context addition and are proud to
|
||||||
|
introduce chi v2, a minimal and powerful routing package for building large HTTP services,
|
||||||
|
with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
|
||||||
|
stdlib HTTP handlers and middlewares.
|
||||||
|
- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
|
||||||
|
- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
|
||||||
|
- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
|
||||||
|
which provides direct access to URL routing parameters, the routing path and the matching
|
||||||
|
routing patterns.
|
||||||
|
- Users upgrading from chi v1 to v2, need to:
|
||||||
|
1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
|
||||||
|
the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
|
||||||
|
2. Use `chi.URLParam(r *http.Request, paramKey string) string`
|
||||||
|
or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
|
||||||
|
|
||||||
|
|
||||||
|
## v1.0.0 (2016-07-01)
|
||||||
|
|
||||||
|
- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
|
||||||
|
|
||||||
|
|
||||||
|
## v0.9.0 (2016-03-31)
|
||||||
|
|
||||||
|
- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
|
||||||
|
- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
|
||||||
|
has changed to: `chi.URLParam(ctx, "id")`
|
||||||
31
vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md
generated
vendored
Normal file
31
vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Contributing
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
1. [Install Go][go-install].
|
||||||
|
2. Download the sources and switch the working directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get -u -d github.com/go-chi/chi
|
||||||
|
cd $GOPATH/src/github.com/go-chi/chi
|
||||||
|
```
|
||||||
|
|
||||||
|
## Submitting a Pull Request
|
||||||
|
|
||||||
|
A typical workflow is:
|
||||||
|
|
||||||
|
1. [Fork the repository.][fork]
|
||||||
|
2. [Create a topic branch.][branch]
|
||||||
|
3. Add tests for your change.
|
||||||
|
4. Run `go test`. If your tests pass, return to the step 3.
|
||||||
|
5. Implement the change and ensure the steps from the previous step pass.
|
||||||
|
6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
|
||||||
|
7. [Add, commit and push your changes.][git-help]
|
||||||
|
8. [Submit a pull request.][pull-req]
|
||||||
|
|
||||||
|
[go-install]: https://golang.org/doc/install
|
||||||
|
[fork]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo
|
||||||
|
[branch]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches
|
||||||
|
[git-help]: https://docs.github.com/en
|
||||||
|
[pull-req]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests
|
||||||
|
|
||||||
20
vendor/github.com/go-chi/chi/v5/LICENSE
generated
vendored
Normal file
20
vendor/github.com/go-chi/chi/v5/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
22
vendor/github.com/go-chi/chi/v5/Makefile
generated
vendored
Normal file
22
vendor/github.com/go-chi/chi/v5/Makefile
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
.PHONY: all
|
||||||
|
all:
|
||||||
|
@echo "**********************************************************"
|
||||||
|
@echo "** chi build tool **"
|
||||||
|
@echo "**********************************************************"
|
||||||
|
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware
|
||||||
|
|
||||||
|
.PHONY: test-router
|
||||||
|
test-router:
|
||||||
|
go test -race -v .
|
||||||
|
|
||||||
|
.PHONY: test-middleware
|
||||||
|
test-middleware:
|
||||||
|
go test -race -v ./middleware
|
||||||
|
|
||||||
|
.PHONY: docs
|
||||||
|
docs:
|
||||||
|
npx docsify-cli serve ./docs
|
||||||
505
vendor/github.com/go-chi/chi/v5/README.md
generated
vendored
Normal file
505
vendor/github.com/go-chi/chi/v5/README.md
generated
vendored
Normal file
@@ -0,0 +1,505 @@
|
|||||||
|
# <img alt="chi" src="https://cdn.rawgit.com/go-chi/chi/master/_examples/chi.svg" width="220" />
|
||||||
|
|
||||||
|
|
||||||
|
[![GoDoc Widget]][GoDoc]
|
||||||
|
|
||||||
|
`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
|
||||||
|
especially good at helping you write large REST API services that are kept maintainable as your
|
||||||
|
project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
|
||||||
|
handle signaling, cancelation and request-scoped values across a handler chain.
|
||||||
|
|
||||||
|
The focus of the project has been to seek out an elegant and comfortable design for writing
|
||||||
|
REST API servers, written during the development of the Pressly API service that powers our
|
||||||
|
public API service, which in turn powers all of our client-side applications.
|
||||||
|
|
||||||
|
The key considerations of chi's design are: project structure, maintainability, standard http
|
||||||
|
handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
|
||||||
|
parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
|
||||||
|
included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render)
|
||||||
|
and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go get -u github.com/go-chi/chi/v5
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
* **Lightweight** - cloc'd in ~1000 LOC for the chi router
|
||||||
|
* **Fast** - yes, see [benchmarks](#benchmarks)
|
||||||
|
* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
|
||||||
|
* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting
|
||||||
|
* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts
|
||||||
|
* **Robust** - in production at Pressly, Cloudflare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
|
||||||
|
* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
|
||||||
|
* **Go.mod support** - as of v5, go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md))
|
||||||
|
* **No external dependencies** - plain ol' Go stdlib + net/http
|
||||||
|
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
|
||||||
|
|
||||||
|
|
||||||
|
**As easy as:**
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := chi.NewRouter()
|
||||||
|
r.Use(middleware.Logger)
|
||||||
|
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Write([]byte("welcome"))
|
||||||
|
})
|
||||||
|
http.ListenAndServe(":3000", r)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**REST Preview:**
|
||||||
|
|
||||||
|
Here is a little preview of what routing looks like with chi. Also take a look at the generated routing docs
|
||||||
|
in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
|
||||||
|
Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
|
||||||
|
|
||||||
|
I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
|
||||||
|
above, they will show you all the features of chi and serve as a good form of documentation.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
//...
|
||||||
|
"context"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := chi.NewRouter()
|
||||||
|
|
||||||
|
// A good base middleware stack
|
||||||
|
r.Use(middleware.RequestID)
|
||||||
|
r.Use(middleware.RealIP)
|
||||||
|
r.Use(middleware.Logger)
|
||||||
|
r.Use(middleware.Recoverer)
|
||||||
|
|
||||||
|
// Set a timeout value on the request context (ctx), that will signal
|
||||||
|
// through ctx.Done() that the request has timed out and further
|
||||||
|
// processing should be stopped.
|
||||||
|
r.Use(middleware.Timeout(60 * time.Second))
|
||||||
|
|
||||||
|
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Write([]byte("hi"))
|
||||||
|
})
|
||||||
|
|
||||||
|
// RESTy routes for "articles" resource
|
||||||
|
r.Route("/articles", func(r chi.Router) {
|
||||||
|
r.With(paginate).Get("/", listArticles) // GET /articles
|
||||||
|
r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
|
||||||
|
|
||||||
|
r.Post("/", createArticle) // POST /articles
|
||||||
|
r.Get("/search", searchArticles) // GET /articles/search
|
||||||
|
|
||||||
|
// Regexp url parameters:
|
||||||
|
r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
|
||||||
|
|
||||||
|
// Subrouters:
|
||||||
|
r.Route("/{articleID}", func(r chi.Router) {
|
||||||
|
r.Use(ArticleCtx)
|
||||||
|
r.Get("/", getArticle) // GET /articles/123
|
||||||
|
r.Put("/", updateArticle) // PUT /articles/123
|
||||||
|
r.Delete("/", deleteArticle) // DELETE /articles/123
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// Mount the admin sub-router
|
||||||
|
r.Mount("/admin", adminRouter())
|
||||||
|
|
||||||
|
http.ListenAndServe(":3333", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArticleCtx(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
articleID := chi.URLParam(r, "articleID")
|
||||||
|
article, err := dbGetArticle(articleID)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, http.StatusText(404), 404)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx := context.WithValue(r.Context(), "article", article)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getArticle(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
article, ok := ctx.Value("article").(*Article)
|
||||||
|
if !ok {
|
||||||
|
http.Error(w, http.StatusText(422), 422)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A completely separate router for administrator routes
|
||||||
|
func adminRouter() http.Handler {
|
||||||
|
r := chi.NewRouter()
|
||||||
|
r.Use(AdminOnly)
|
||||||
|
r.Get("/", adminIndex)
|
||||||
|
r.Get("/accounts", adminListAccounts)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func AdminOnly(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
perm, ok := ctx.Value("acl.permission").(YourPermissionType)
|
||||||
|
if !ok || !perm.IsAdmin() {
|
||||||
|
http.Error(w, http.StatusText(403), 403)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Router interface
|
||||||
|
|
||||||
|
chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
|
||||||
|
The router is fully compatible with `net/http`.
|
||||||
|
|
||||||
|
Built on top of the tree is the `Router` interface:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Router consisting of the core routing methods used by chi's Mux,
|
||||||
|
// using only the standard net/http.
|
||||||
|
type Router interface {
|
||||||
|
http.Handler
|
||||||
|
Routes
|
||||||
|
|
||||||
|
// Use appends one or more middlewares onto the Router stack.
|
||||||
|
Use(middlewares ...func(http.Handler) http.Handler)
|
||||||
|
|
||||||
|
// With adds inline middlewares for an endpoint handler.
|
||||||
|
With(middlewares ...func(http.Handler) http.Handler) Router
|
||||||
|
|
||||||
|
// Group adds a new inline-Router along the current routing
|
||||||
|
// path, with a fresh middleware stack for the inline-Router.
|
||||||
|
Group(fn func(r Router)) Router
|
||||||
|
|
||||||
|
// Route mounts a sub-Router along a `pattern` string.
|
||||||
|
Route(pattern string, fn func(r Router)) Router
|
||||||
|
|
||||||
|
// Mount attaches another http.Handler along ./pattern/*
|
||||||
|
Mount(pattern string, h http.Handler)
|
||||||
|
|
||||||
|
// Handle and HandleFunc adds routes for `pattern` that matches
|
||||||
|
// all HTTP methods.
|
||||||
|
Handle(pattern string, h http.Handler)
|
||||||
|
HandleFunc(pattern string, h http.HandlerFunc)
|
||||||
|
|
||||||
|
// Method and MethodFunc adds routes for `pattern` that matches
|
||||||
|
// the `method` HTTP method.
|
||||||
|
Method(method, pattern string, h http.Handler)
|
||||||
|
MethodFunc(method, pattern string, h http.HandlerFunc)
|
||||||
|
|
||||||
|
// HTTP-method routing along `pattern`
|
||||||
|
Connect(pattern string, h http.HandlerFunc)
|
||||||
|
Delete(pattern string, h http.HandlerFunc)
|
||||||
|
Get(pattern string, h http.HandlerFunc)
|
||||||
|
Head(pattern string, h http.HandlerFunc)
|
||||||
|
Options(pattern string, h http.HandlerFunc)
|
||||||
|
Patch(pattern string, h http.HandlerFunc)
|
||||||
|
Post(pattern string, h http.HandlerFunc)
|
||||||
|
Put(pattern string, h http.HandlerFunc)
|
||||||
|
Trace(pattern string, h http.HandlerFunc)
|
||||||
|
|
||||||
|
// NotFound defines a handler to respond whenever a route could
|
||||||
|
// not be found.
|
||||||
|
NotFound(h http.HandlerFunc)
|
||||||
|
|
||||||
|
// MethodNotAllowed defines a handler to respond whenever a method is
|
||||||
|
// not allowed.
|
||||||
|
MethodNotAllowed(h http.HandlerFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routes interface adds two methods for router traversal, which is also
|
||||||
|
// used by the github.com/go-chi/docgen package to generate documentation for Routers.
|
||||||
|
type Routes interface {
|
||||||
|
// Routes returns the routing tree in an easily traversable structure.
|
||||||
|
Routes() []Route
|
||||||
|
|
||||||
|
// Middlewares returns the list of middlewares in use by the router.
|
||||||
|
Middlewares() Middlewares
|
||||||
|
|
||||||
|
// Match searches the routing tree for a handler that matches
|
||||||
|
// the method/path - similar to routing a http request, but without
|
||||||
|
// executing the handler thereafter.
|
||||||
|
Match(rctx *Context, method, path string) bool
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
|
||||||
|
supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
|
||||||
|
can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
|
||||||
|
and `chi.URLParam(r, "*")` for a wildcard parameter.
|
||||||
|
|
||||||
|
|
||||||
|
### Middleware handlers
|
||||||
|
|
||||||
|
chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
|
||||||
|
about them, which means the router and all the tooling is designed to be compatible and
|
||||||
|
friendly with any middleware in the community. This offers much better extensibility and reuse
|
||||||
|
of packages and is at the heart of chi's purpose.
|
||||||
|
|
||||||
|
Here is an example of a standard net/http middleware where we assign a context key `"user"`
|
||||||
|
the value of `"123"`. This middleware sets a hypothetical user identifier on the request
|
||||||
|
context and calls the next handler in the chain.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// HTTP middleware setting a value on the request context
|
||||||
|
func MyMiddleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// create new context from `r` request context, and assign key `"user"`
|
||||||
|
// to value of `"123"`
|
||||||
|
ctx := context.WithValue(r.Context(), "user", "123")
|
||||||
|
|
||||||
|
// call the next handler in the chain, passing the response writer and
|
||||||
|
// the updated request object with the new context value.
|
||||||
|
//
|
||||||
|
// note: context.Context values are nested, so any previously set
|
||||||
|
// values will be accessible as well, and the new `"user"` key
|
||||||
|
// will be accessible from this point forward.
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Request handlers
|
||||||
|
|
||||||
|
chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
|
||||||
|
func that reads a user identifier from the request context - hypothetically, identifying
|
||||||
|
the user sending an authenticated request, validated+set by a previous middleware handler.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// HTTP handler accessing data from the request context.
|
||||||
|
func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// here we read from the request context and fetch out `"user"` key set in
|
||||||
|
// the MyMiddleware example above.
|
||||||
|
user := r.Context().Value("user").(string)
|
||||||
|
|
||||||
|
// respond to the client
|
||||||
|
w.Write([]byte(fmt.Sprintf("hi %s", user)))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### URL parameters
|
||||||
|
|
||||||
|
chi's router parses and stores URL parameters right onto the request context. Here is
|
||||||
|
an example of how to access URL params in your net/http handlers. And of course, middlewares
|
||||||
|
are able to access the same information.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// HTTP handler accessing the url routing parameters.
|
||||||
|
func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// fetch the url parameter `"userID"` from the request of a matching
|
||||||
|
// routing pattern. An example routing pattern could be: /users/{userID}
|
||||||
|
userID := chi.URLParam(r, "userID")
|
||||||
|
|
||||||
|
// fetch `"key"` from the request context
|
||||||
|
ctx := r.Context()
|
||||||
|
key := ctx.Value("key").(string)
|
||||||
|
|
||||||
|
// respond to the client
|
||||||
|
w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Middlewares
|
||||||
|
|
||||||
|
chi comes equipped with an optional `middleware` package, providing a suite of standard
|
||||||
|
`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
|
||||||
|
with `net/http` can be used with chi's mux.
|
||||||
|
|
||||||
|
### Core middlewares
|
||||||
|
|
||||||
|
----------------------------------------------------------------------------------------------------
|
||||||
|
| chi/middleware Handler | description |
|
||||||
|
| :--------------------- | :---------------------------------------------------------------------- |
|
||||||
|
| [AllowContentEncoding] | Enforces a whitelist of request Content-Encoding headers |
|
||||||
|
| [AllowContentType] | Explicit whitelist of accepted request Content-Types |
|
||||||
|
| [BasicAuth] | Basic HTTP authentication |
|
||||||
|
| [Compress] | Gzip compression for clients that accept compressed responses |
|
||||||
|
| [ContentCharset] | Ensure charset for Content-Type request headers |
|
||||||
|
| [CleanPath] | Clean double slashes from request path |
|
||||||
|
| [GetHead] | Automatically route undefined HEAD requests to GET handlers |
|
||||||
|
| [Heartbeat] | Monitoring endpoint to check the servers pulse |
|
||||||
|
| [Logger] | Logs the start and end of each request with the elapsed processing time |
|
||||||
|
| [NoCache] | Sets response headers to prevent clients from caching |
|
||||||
|
| [Profiler] | Easily attach net/http/pprof to your routers |
|
||||||
|
| [RealIP] | Sets a http.Request's RemoteAddr to either X-Real-IP or X-Forwarded-For |
|
||||||
|
| [Recoverer] | Gracefully absorb panics and prints the stack trace |
|
||||||
|
| [RequestID] | Injects a request ID into the context of each request |
|
||||||
|
| [RedirectSlashes] | Redirect slashes on routing paths |
|
||||||
|
| [RouteHeaders] | Route handling for request headers |
|
||||||
|
| [SetHeader] | Short-hand middleware to set a response header key/value |
|
||||||
|
| [StripSlashes] | Strip slashes on routing paths |
|
||||||
|
| [Sunset] | Sunset set Deprecation/Sunset header to response |
|
||||||
|
| [Throttle] | Puts a ceiling on the number of concurrent requests |
|
||||||
|
| [Timeout] | Signals to the request context when the timeout deadline is reached |
|
||||||
|
| [URLFormat] | Parse extension from url and put it on request context |
|
||||||
|
| [WithValue] | Short-hand middleware to set a key/value on the request context |
|
||||||
|
----------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[AllowContentEncoding]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentEncoding
|
||||||
|
[AllowContentType]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentType
|
||||||
|
[BasicAuth]: https://pkg.go.dev/github.com/go-chi/chi/middleware#BasicAuth
|
||||||
|
[Compress]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compress
|
||||||
|
[ContentCharset]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ContentCharset
|
||||||
|
[CleanPath]: https://pkg.go.dev/github.com/go-chi/chi/middleware#CleanPath
|
||||||
|
[GetHead]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetHead
|
||||||
|
[GetReqID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetReqID
|
||||||
|
[Heartbeat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Heartbeat
|
||||||
|
[Logger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Logger
|
||||||
|
[NoCache]: https://pkg.go.dev/github.com/go-chi/chi/middleware#NoCache
|
||||||
|
[Profiler]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Profiler
|
||||||
|
[RealIP]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RealIP
|
||||||
|
[Recoverer]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Recoverer
|
||||||
|
[RedirectSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RedirectSlashes
|
||||||
|
[RequestLogger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestLogger
|
||||||
|
[RequestID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestID
|
||||||
|
[RouteHeaders]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RouteHeaders
|
||||||
|
[SetHeader]: https://pkg.go.dev/github.com/go-chi/chi/middleware#SetHeader
|
||||||
|
[StripSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#StripSlashes
|
||||||
|
[Sunset]: https://pkg.go.dev/github.com/go-chi/chi/v5/middleware#Sunset
|
||||||
|
[Throttle]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Throttle
|
||||||
|
[ThrottleBacklog]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleBacklog
|
||||||
|
[ThrottleWithOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleWithOpts
|
||||||
|
[Timeout]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Timeout
|
||||||
|
[URLFormat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#URLFormat
|
||||||
|
[WithLogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithLogEntry
|
||||||
|
[WithValue]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithValue
|
||||||
|
[Compressor]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compressor
|
||||||
|
[DefaultLogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#DefaultLogFormatter
|
||||||
|
[EncoderFunc]: https://pkg.go.dev/github.com/go-chi/chi/middleware#EncoderFunc
|
||||||
|
[HeaderRoute]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRoute
|
||||||
|
[HeaderRouter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRouter
|
||||||
|
[LogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogEntry
|
||||||
|
[LogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogFormatter
|
||||||
|
[LoggerInterface]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LoggerInterface
|
||||||
|
[ThrottleOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleOpts
|
||||||
|
[WrapResponseWriter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WrapResponseWriter
|
||||||
|
|
||||||
|
### Extra middlewares & packages
|
||||||
|
|
||||||
|
Please see https://github.com/go-chi for additional packages.
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------------------------------------------
|
||||||
|
| package | description |
|
||||||
|
|:---------------------------------------------------|:-------------------------------------------------------------
|
||||||
|
| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
|
||||||
|
| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
|
||||||
|
| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
|
||||||
|
| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
|
||||||
|
| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging |
|
||||||
|
| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter |
|
||||||
|
| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library |
|
||||||
|
| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources |
|
||||||
|
| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer |
|
||||||
|
--------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
## context?
|
||||||
|
|
||||||
|
`context` is a tiny pkg that provides simple interface to signal context across call stacks
|
||||||
|
and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
|
||||||
|
and is available in stdlib since go1.7.
|
||||||
|
|
||||||
|
Learn more at https://blog.golang.org/context
|
||||||
|
|
||||||
|
and..
|
||||||
|
* Docs: https://golang.org/pkg/context
|
||||||
|
* Source: https://github.com/golang/go/tree/master/src/context
|
||||||
|
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
|
||||||
|
|
||||||
|
Results as of Nov 29, 2020 with Go 1.15.5 on Linux AMD 3950x
|
||||||
|
|
||||||
|
```shell
|
||||||
|
BenchmarkChi_Param 3075895 384 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_Param5 2116603 566 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_Param20 964117 1227 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_ParamWrite 2863413 420 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_GithubStatic 3045488 395 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_GithubParam 2204115 540 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_GithubAll 10000 113811 ns/op 81203 B/op 406 allocs/op
|
||||||
|
BenchmarkChi_GPlusStatic 3337485 359 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_GPlusParam 2825853 423 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_GPlus2Params 2471697 483 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_GPlusAll 194220 5950 ns/op 5200 B/op 26 allocs/op
|
||||||
|
BenchmarkChi_ParseStatic 3365324 356 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_ParseParam 2976614 404 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_Parse2Params 2638084 439 ns/op 400 B/op 2 allocs/op
|
||||||
|
BenchmarkChi_ParseAll 109567 11295 ns/op 10400 B/op 52 allocs/op
|
||||||
|
BenchmarkChi_StaticAll 16846 71308 ns/op 62802 B/op 314 allocs/op
|
||||||
|
```
|
||||||
|
|
||||||
|
Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
|
||||||
|
|
||||||
|
NOTE: the allocs in the benchmark above are from the calls to http.Request's
|
||||||
|
`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
|
||||||
|
on the duplicated (alloc'd) request and returns it the new request object. This is just
|
||||||
|
how setting context on a request in Go works.
|
||||||
|
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
* Carl Jackson for https://github.com/zenazn/goji
|
||||||
|
* Parts of chi's thinking comes from goji, and chi's middleware package
|
||||||
|
sources from [goji](https://github.com/zenazn/goji/tree/master/web/middleware).
|
||||||
|
* Please see goji's [LICENSE](https://github.com/zenazn/goji/blob/master/LICENSE) (MIT)
|
||||||
|
* Armon Dadgar for https://github.com/armon/go-radix
|
||||||
|
* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
|
||||||
|
|
||||||
|
We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
|
||||||
|
|
||||||
|
|
||||||
|
## Beyond REST
|
||||||
|
|
||||||
|
chi is just a http router that lets you decompose request handling into many smaller layers.
|
||||||
|
Many companies use chi to write REST services for their public APIs. But, REST is just a convention
|
||||||
|
for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server
|
||||||
|
system or network of microservices.
|
||||||
|
|
||||||
|
Looking beyond REST, I also recommend some newer works in the field:
|
||||||
|
* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen
|
||||||
|
* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs
|
||||||
|
* [graphql](https://github.com/99designs/gqlgen) - Declarative query language
|
||||||
|
* [NATS](https://nats.io) - lightweight pub-sub
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
|
||||||
|
|
||||||
|
Licensed under [MIT License](./LICENSE)
|
||||||
|
|
||||||
|
[GoDoc]: https://pkg.go.dev/github.com/go-chi/chi/v5
|
||||||
|
[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
|
||||||
|
[Travis]: https://travis-ci.org/go-chi/chi
|
||||||
|
[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master
|
||||||
5
vendor/github.com/go-chi/chi/v5/SECURITY.md
generated
vendored
Normal file
5
vendor/github.com/go-chi/chi/v5/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Reporting Security Issues
|
||||||
|
|
||||||
|
We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions.
|
||||||
|
|
||||||
|
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/go-chi/chi/security/advisories/new) tab.
|
||||||
49
vendor/github.com/go-chi/chi/v5/chain.go
generated
vendored
Normal file
49
vendor/github.com/go-chi/chi/v5/chain.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package chi
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// Chain returns a Middlewares type from a slice of middleware handlers.
|
||||||
|
func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
|
||||||
|
return Middlewares(middlewares)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler builds and returns a http.Handler from the chain of middlewares,
|
||||||
|
// with `h http.Handler` as the final handler.
|
||||||
|
func (mws Middlewares) Handler(h http.Handler) http.Handler {
|
||||||
|
return &ChainHandler{h, chain(mws, h), mws}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
|
||||||
|
// with `h http.Handler` as the final handler.
|
||||||
|
func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
|
||||||
|
return &ChainHandler{h, chain(mws, h), mws}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainHandler is a http.Handler with support for handler composition and
|
||||||
|
// execution.
|
||||||
|
type ChainHandler struct {
|
||||||
|
Endpoint http.Handler
|
||||||
|
chain http.Handler
|
||||||
|
Middlewares Middlewares
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
c.chain.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// chain builds a http.Handler composed of an inline middleware stack and endpoint
|
||||||
|
// handler in the order they are passed.
|
||||||
|
func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
|
||||||
|
// Return ahead of time if there aren't any middlewares for the chain
|
||||||
|
if len(middlewares) == 0 {
|
||||||
|
return endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap the end handler with the middleware chain
|
||||||
|
h := middlewares[len(middlewares)-1](endpoint)
|
||||||
|
for i := len(middlewares) - 2; i >= 0; i-- {
|
||||||
|
h = middlewares[i](h)
|
||||||
|
}
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
137
vendor/github.com/go-chi/chi/v5/chi.go
generated
vendored
Normal file
137
vendor/github.com/go-chi/chi/v5/chi.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// Package chi is a small, idiomatic and composable router for building HTTP services.
|
||||||
|
//
|
||||||
|
// chi supports the four most recent major versions of Go.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// package main
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "net/http"
|
||||||
|
//
|
||||||
|
// "github.com/go-chi/chi/v5"
|
||||||
|
// "github.com/go-chi/chi/v5/middleware"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// func main() {
|
||||||
|
// r := chi.NewRouter()
|
||||||
|
// r.Use(middleware.Logger)
|
||||||
|
// r.Use(middleware.Recoverer)
|
||||||
|
//
|
||||||
|
// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// w.Write([]byte("root."))
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// http.ListenAndServe(":3333", r)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// See github.com/go-chi/chi/_examples/ for more in-depth examples.
|
||||||
|
//
|
||||||
|
// URL patterns allow for easy matching of path components in HTTP
|
||||||
|
// requests. The matching components can then be accessed using
|
||||||
|
// chi.URLParam(). All patterns must begin with a slash.
|
||||||
|
//
|
||||||
|
// A simple named placeholder {name} matches any sequence of characters
|
||||||
|
// up to the next / or the end of the URL. Trailing slashes on paths must
|
||||||
|
// be handled explicitly.
|
||||||
|
//
|
||||||
|
// A placeholder with a name followed by a colon allows a regular
|
||||||
|
// expression match, for example {number:\\d+}. The regular expression
|
||||||
|
// syntax is Go's normal regexp RE2 syntax, except that / will never be
|
||||||
|
// matched. An anonymous regexp pattern is allowed, using an empty string
|
||||||
|
// before the colon in the placeholder, such as {:\\d+}
|
||||||
|
//
|
||||||
|
// The special placeholder of asterisk matches the rest of the requested
|
||||||
|
// URL. Any trailing characters in the pattern are ignored. This is the only
|
||||||
|
// placeholder which will match / characters.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
//
|
||||||
|
// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
|
||||||
|
// "/user/{name}/info" matches "/user/jsmith/info"
|
||||||
|
// "/page/*" matches "/page/intro/latest"
|
||||||
|
// "/page/{other}/latest" also matches "/page/intro/latest"
|
||||||
|
// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
|
||||||
|
package chi
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// NewRouter returns a new Mux object that implements the Router interface.
|
||||||
|
func NewRouter() *Mux {
|
||||||
|
return NewMux()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Router consisting of the core routing methods used by chi's Mux,
|
||||||
|
// using only the standard net/http.
|
||||||
|
type Router interface {
|
||||||
|
http.Handler
|
||||||
|
Routes
|
||||||
|
|
||||||
|
// Use appends one or more middlewares onto the Router stack.
|
||||||
|
Use(middlewares ...func(http.Handler) http.Handler)
|
||||||
|
|
||||||
|
// With adds inline middlewares for an endpoint handler.
|
||||||
|
With(middlewares ...func(http.Handler) http.Handler) Router
|
||||||
|
|
||||||
|
// Group adds a new inline-Router along the current routing
|
||||||
|
// path, with a fresh middleware stack for the inline-Router.
|
||||||
|
Group(fn func(r Router)) Router
|
||||||
|
|
||||||
|
// Route mounts a sub-Router along a `pattern`` string.
|
||||||
|
Route(pattern string, fn func(r Router)) Router
|
||||||
|
|
||||||
|
// Mount attaches another http.Handler along ./pattern/*
|
||||||
|
Mount(pattern string, h http.Handler)
|
||||||
|
|
||||||
|
// Handle and HandleFunc adds routes for `pattern` that matches
|
||||||
|
// all HTTP methods.
|
||||||
|
Handle(pattern string, h http.Handler)
|
||||||
|
HandleFunc(pattern string, h http.HandlerFunc)
|
||||||
|
|
||||||
|
// Method and MethodFunc adds routes for `pattern` that matches
|
||||||
|
// the `method` HTTP method.
|
||||||
|
Method(method, pattern string, h http.Handler)
|
||||||
|
MethodFunc(method, pattern string, h http.HandlerFunc)
|
||||||
|
|
||||||
|
// HTTP-method routing along `pattern`
|
||||||
|
Connect(pattern string, h http.HandlerFunc)
|
||||||
|
Delete(pattern string, h http.HandlerFunc)
|
||||||
|
Get(pattern string, h http.HandlerFunc)
|
||||||
|
Head(pattern string, h http.HandlerFunc)
|
||||||
|
Options(pattern string, h http.HandlerFunc)
|
||||||
|
Patch(pattern string, h http.HandlerFunc)
|
||||||
|
Post(pattern string, h http.HandlerFunc)
|
||||||
|
Put(pattern string, h http.HandlerFunc)
|
||||||
|
Trace(pattern string, h http.HandlerFunc)
|
||||||
|
|
||||||
|
// NotFound defines a handler to respond whenever a route could
|
||||||
|
// not be found.
|
||||||
|
NotFound(h http.HandlerFunc)
|
||||||
|
|
||||||
|
// MethodNotAllowed defines a handler to respond whenever a method is
|
||||||
|
// not allowed.
|
||||||
|
MethodNotAllowed(h http.HandlerFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routes interface adds two methods for router traversal, which is also
|
||||||
|
// used by the `docgen` subpackage to generation documentation for Routers.
|
||||||
|
type Routes interface {
|
||||||
|
// Routes returns the routing tree in an easily traversable structure.
|
||||||
|
Routes() []Route
|
||||||
|
|
||||||
|
// Middlewares returns the list of middlewares in use by the router.
|
||||||
|
Middlewares() Middlewares
|
||||||
|
|
||||||
|
// Match searches the routing tree for a handler that matches
|
||||||
|
// the method/path - similar to routing a http request, but without
|
||||||
|
// executing the handler thereafter.
|
||||||
|
Match(rctx *Context, method, path string) bool
|
||||||
|
|
||||||
|
// Find searches the routing tree for the pattern that matches
|
||||||
|
// the method/path.
|
||||||
|
Find(rctx *Context, method, path string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Middlewares type is a slice of standard middleware handlers with methods
|
||||||
|
// to compose middleware chains and http.Handler's.
|
||||||
|
type Middlewares []func(http.Handler) http.Handler
|
||||||
166
vendor/github.com/go-chi/chi/v5/context.go
generated
vendored
Normal file
166
vendor/github.com/go-chi/chi/v5/context.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package chi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// URLParam returns the url parameter from a http.Request object.
|
||||||
|
func URLParam(r *http.Request, key string) string {
|
||||||
|
if rctx := RouteContext(r.Context()); rctx != nil {
|
||||||
|
return rctx.URLParam(key)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// URLParamFromCtx returns the url parameter from a http.Request Context.
|
||||||
|
func URLParamFromCtx(ctx context.Context, key string) string {
|
||||||
|
if rctx := RouteContext(ctx); rctx != nil {
|
||||||
|
return rctx.URLParam(key)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteContext returns chi's routing Context object from a
|
||||||
|
// http.Request Context.
|
||||||
|
func RouteContext(ctx context.Context) *Context {
|
||||||
|
val, _ := ctx.Value(RouteCtxKey).(*Context)
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRouteContext returns a new routing Context object.
|
||||||
|
func NewRouteContext() *Context {
|
||||||
|
return &Context{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// RouteCtxKey is the context.Context key to store the request context.
|
||||||
|
RouteCtxKey = &contextKey{"RouteContext"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Context is the default routing context set on the root node of a
|
||||||
|
// request context to track route patterns, URL parameters and
|
||||||
|
// an optional routing path.
|
||||||
|
type Context struct {
|
||||||
|
Routes Routes
|
||||||
|
|
||||||
|
// parentCtx is the parent of this one, for using Context as a
|
||||||
|
// context.Context directly. This is an optimization that saves
|
||||||
|
// 1 allocation.
|
||||||
|
parentCtx context.Context
|
||||||
|
|
||||||
|
// Routing path/method override used during the route search.
|
||||||
|
// See Mux#routeHTTP method.
|
||||||
|
RoutePath string
|
||||||
|
RouteMethod string
|
||||||
|
|
||||||
|
// URLParams are the stack of routeParams captured during the
|
||||||
|
// routing lifecycle across a stack of sub-routers.
|
||||||
|
URLParams RouteParams
|
||||||
|
|
||||||
|
// Route parameters matched for the current sub-router. It is
|
||||||
|
// intentionally unexported so it can't be tampered.
|
||||||
|
routeParams RouteParams
|
||||||
|
|
||||||
|
// The endpoint routing pattern that matched the request URI path
|
||||||
|
// or `RoutePath` of the current sub-router. This value will update
|
||||||
|
// during the lifecycle of a request passing through a stack of
|
||||||
|
// sub-routers.
|
||||||
|
routePattern string
|
||||||
|
|
||||||
|
// Routing pattern stack throughout the lifecycle of the request,
|
||||||
|
// across all connected routers. It is a record of all matching
|
||||||
|
// patterns across a stack of sub-routers.
|
||||||
|
RoutePatterns []string
|
||||||
|
|
||||||
|
methodsAllowed []methodTyp // allowed methods in case of a 405
|
||||||
|
methodNotAllowed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset a routing context to its initial state.
|
||||||
|
func (x *Context) Reset() {
|
||||||
|
x.Routes = nil
|
||||||
|
x.RoutePath = ""
|
||||||
|
x.RouteMethod = ""
|
||||||
|
x.RoutePatterns = x.RoutePatterns[:0]
|
||||||
|
x.URLParams.Keys = x.URLParams.Keys[:0]
|
||||||
|
x.URLParams.Values = x.URLParams.Values[:0]
|
||||||
|
|
||||||
|
x.routePattern = ""
|
||||||
|
x.routeParams.Keys = x.routeParams.Keys[:0]
|
||||||
|
x.routeParams.Values = x.routeParams.Values[:0]
|
||||||
|
x.methodNotAllowed = false
|
||||||
|
x.methodsAllowed = x.methodsAllowed[:0]
|
||||||
|
x.parentCtx = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// URLParam returns the corresponding URL parameter value from the request
|
||||||
|
// routing context.
|
||||||
|
func (x *Context) URLParam(key string) string {
|
||||||
|
for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
|
||||||
|
if x.URLParams.Keys[k] == key {
|
||||||
|
return x.URLParams.Values[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoutePattern builds the routing pattern string for the particular
|
||||||
|
// request, at the particular point during routing. This means, the value
|
||||||
|
// will change throughout the execution of a request in a router. That is
|
||||||
|
// why it's advised to only use this value after calling the next handler.
|
||||||
|
//
|
||||||
|
// For example,
|
||||||
|
//
|
||||||
|
// func Instrument(next http.Handler) http.Handler {
|
||||||
|
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// next.ServeHTTP(w, r)
|
||||||
|
// routePattern := chi.RouteContext(r.Context()).RoutePattern()
|
||||||
|
// measure(w, r, routePattern)
|
||||||
|
// })
|
||||||
|
// }
|
||||||
|
func (x *Context) RoutePattern() string {
|
||||||
|
if x == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
routePattern := strings.Join(x.RoutePatterns, "")
|
||||||
|
routePattern = replaceWildcards(routePattern)
|
||||||
|
if routePattern != "/" {
|
||||||
|
routePattern = strings.TrimSuffix(routePattern, "//")
|
||||||
|
routePattern = strings.TrimSuffix(routePattern, "/")
|
||||||
|
}
|
||||||
|
return routePattern
|
||||||
|
}
|
||||||
|
|
||||||
|
// replaceWildcards takes a route pattern and replaces all occurrences of
|
||||||
|
// "/*/" with "/". It iteratively runs until no wildcards remain to
|
||||||
|
// correctly handle consecutive wildcards.
|
||||||
|
func replaceWildcards(p string) string {
|
||||||
|
for strings.Contains(p, "/*/") {
|
||||||
|
p = strings.ReplaceAll(p, "/*/", "/")
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteParams is a structure to track URL routing parameters efficiently.
|
||||||
|
type RouteParams struct {
|
||||||
|
Keys, Values []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add will append a URL parameter to the end of the route param
|
||||||
|
func (s *RouteParams) Add(key, value string) {
|
||||||
|
s.Keys = append(s.Keys, key)
|
||||||
|
s.Values = append(s.Values, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// contextKey is a value for use with context.WithValue. It's used as
|
||||||
|
// a pointer so it fits in an interface{} without allocation. This technique
|
||||||
|
// for defining context keys was copied from Go 1.7's new use of context in net/http.
|
||||||
|
type contextKey struct {
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *contextKey) String() string {
|
||||||
|
return "chi context value " + k.name
|
||||||
|
}
|
||||||
528
vendor/github.com/go-chi/chi/v5/mux.go
generated
vendored
Normal file
528
vendor/github.com/go-chi/chi/v5/mux.go
generated
vendored
Normal file
@@ -0,0 +1,528 @@
|
|||||||
|
package chi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Router = &Mux{}
|
||||||
|
|
||||||
|
// Mux is a simple HTTP route multiplexer that parses a request path,
|
||||||
|
// records any URL params, and executes an end handler. It implements
|
||||||
|
// the http.Handler interface and is friendly with the standard library.
|
||||||
|
//
|
||||||
|
// Mux is designed to be fast, minimal and offer a powerful API for building
|
||||||
|
// modular and composable HTTP services with a large set of handlers. It's
|
||||||
|
// particularly useful for writing large REST API services that break a handler
|
||||||
|
// into many smaller parts composed of middlewares and end handlers.
|
||||||
|
type Mux struct {
|
||||||
|
// The computed mux handler made of the chained middleware stack and
|
||||||
|
// the tree router
|
||||||
|
handler http.Handler
|
||||||
|
|
||||||
|
// The radix trie router
|
||||||
|
tree *node
|
||||||
|
|
||||||
|
// Custom method not allowed handler
|
||||||
|
methodNotAllowedHandler http.HandlerFunc
|
||||||
|
|
||||||
|
// A reference to the parent mux used by subrouters when mounting
|
||||||
|
// to a parent mux
|
||||||
|
parent *Mux
|
||||||
|
|
||||||
|
// Routing context pool
|
||||||
|
pool *sync.Pool
|
||||||
|
|
||||||
|
// Custom route not found handler
|
||||||
|
notFoundHandler http.HandlerFunc
|
||||||
|
|
||||||
|
// The middleware stack
|
||||||
|
middlewares []func(http.Handler) http.Handler
|
||||||
|
|
||||||
|
// Controls the behaviour of middleware chain generation when a mux
|
||||||
|
// is registered as an inline group inside another mux.
|
||||||
|
inline bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMux returns a newly initialized Mux object that implements the Router
|
||||||
|
// interface.
|
||||||
|
func NewMux() *Mux {
|
||||||
|
mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
|
||||||
|
mux.pool.New = func() interface{} {
|
||||||
|
return NewRouteContext()
|
||||||
|
}
|
||||||
|
return mux
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHTTP is the single method of the http.Handler interface that makes
|
||||||
|
// Mux interoperable with the standard library. It uses a sync.Pool to get and
|
||||||
|
// reuse routing contexts for each request.
|
||||||
|
func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Ensure the mux has some routes defined on the mux
|
||||||
|
if mx.handler == nil {
|
||||||
|
mx.NotFoundHandler().ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a routing context already exists from a parent router.
|
||||||
|
rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
|
||||||
|
if rctx != nil {
|
||||||
|
mx.handler.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch a RouteContext object from the sync pool, and call the computed
|
||||||
|
// mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
|
||||||
|
// Once the request is finished, reset the routing context and put it back
|
||||||
|
// into the pool for reuse from another request.
|
||||||
|
rctx = mx.pool.Get().(*Context)
|
||||||
|
rctx.Reset()
|
||||||
|
rctx.Routes = mx
|
||||||
|
rctx.parentCtx = r.Context()
|
||||||
|
|
||||||
|
// NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
|
||||||
|
r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
|
||||||
|
|
||||||
|
// Serve the request and once its done, put the request context back in the sync pool
|
||||||
|
mx.handler.ServeHTTP(w, r)
|
||||||
|
mx.pool.Put(rctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use appends a middleware handler to the Mux middleware stack.
|
||||||
|
//
|
||||||
|
// The middleware stack for any Mux will execute before searching for a matching
|
||||||
|
// route to a specific handler, which provides opportunity to respond early,
|
||||||
|
// change the course of the request execution, or set request-scoped values for
|
||||||
|
// the next http.Handler.
|
||||||
|
func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
|
||||||
|
if mx.handler != nil {
|
||||||
|
panic("chi: all middlewares must be defined before routes on a mux")
|
||||||
|
}
|
||||||
|
mx.middlewares = append(mx.middlewares, middlewares...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle adds the route `pattern` that matches any http method to
|
||||||
|
// execute the `handler` http.Handler.
|
||||||
|
func (mx *Mux) Handle(pattern string, handler http.Handler) {
|
||||||
|
if i := strings.IndexAny(pattern, " \t"); i >= 0 {
|
||||||
|
method, rest := pattern[:i], strings.TrimLeft(pattern[i+1:], " \t")
|
||||||
|
mx.Method(method, rest, handler)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mx.handle(mALL, pattern, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleFunc adds the route `pattern` that matches any http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.Handle(pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Method adds the route `pattern` that matches `method` http method to
|
||||||
|
// execute the `handler` http.Handler.
|
||||||
|
func (mx *Mux) Method(method, pattern string, handler http.Handler) {
|
||||||
|
m, ok := methodMap[strings.ToUpper(method)]
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
|
||||||
|
}
|
||||||
|
mx.handle(m, pattern, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MethodFunc adds the route `pattern` that matches `method` http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.Method(method, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect adds the route `pattern` that matches a CONNECT http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mCONNECT, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete adds the route `pattern` that matches a DELETE http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mDELETE, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get adds the route `pattern` that matches a GET http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mGET, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Head adds the route `pattern` that matches a HEAD http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mHEAD, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options adds the route `pattern` that matches an OPTIONS http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mOPTIONS, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patch adds the route `pattern` that matches a PATCH http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mPATCH, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post adds the route `pattern` that matches a POST http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mPOST, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put adds the route `pattern` that matches a PUT http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mPUT, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trace adds the route `pattern` that matches a TRACE http method to
|
||||||
|
// execute the `handlerFn` http.HandlerFunc.
|
||||||
|
func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
|
||||||
|
mx.handle(mTRACE, pattern, handlerFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotFound sets a custom http.HandlerFunc for routing paths that could
|
||||||
|
// not be found. The default 404 handler is `http.NotFound`.
|
||||||
|
func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
|
||||||
|
// Build NotFound handler chain
|
||||||
|
m := mx
|
||||||
|
hFn := handlerFn
|
||||||
|
if mx.inline && mx.parent != nil {
|
||||||
|
m = mx.parent
|
||||||
|
hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the notFoundHandler from this point forward
|
||||||
|
m.notFoundHandler = hFn
|
||||||
|
m.updateSubRoutes(func(subMux *Mux) {
|
||||||
|
if subMux.notFoundHandler == nil {
|
||||||
|
subMux.NotFound(hFn)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
|
||||||
|
// method is unresolved. The default handler returns a 405 with an empty body.
|
||||||
|
func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
|
||||||
|
// Build MethodNotAllowed handler chain
|
||||||
|
m := mx
|
||||||
|
hFn := handlerFn
|
||||||
|
if mx.inline && mx.parent != nil {
|
||||||
|
m = mx.parent
|
||||||
|
hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the methodNotAllowedHandler from this point forward
|
||||||
|
m.methodNotAllowedHandler = hFn
|
||||||
|
m.updateSubRoutes(func(subMux *Mux) {
|
||||||
|
if subMux.methodNotAllowedHandler == nil {
|
||||||
|
subMux.MethodNotAllowed(hFn)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// With adds inline middlewares for an endpoint handler.
|
||||||
|
func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
|
||||||
|
// Similarly as in handle(), we must build the mux handler once additional
|
||||||
|
// middleware registration isn't allowed for this stack, like now.
|
||||||
|
if !mx.inline && mx.handler == nil {
|
||||||
|
mx.updateRouteHandler()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy middlewares from parent inline muxs
|
||||||
|
var mws Middlewares
|
||||||
|
if mx.inline {
|
||||||
|
mws = make(Middlewares, len(mx.middlewares))
|
||||||
|
copy(mws, mx.middlewares)
|
||||||
|
}
|
||||||
|
mws = append(mws, middlewares...)
|
||||||
|
|
||||||
|
im := &Mux{
|
||||||
|
pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws,
|
||||||
|
notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler,
|
||||||
|
}
|
||||||
|
|
||||||
|
return im
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group creates a new inline-Mux with a copy of middleware stack. It's useful
|
||||||
|
// for a group of handlers along the same routing path that use an additional
|
||||||
|
// set of middlewares. See _examples/.
|
||||||
|
func (mx *Mux) Group(fn func(r Router)) Router {
|
||||||
|
im := mx.With()
|
||||||
|
if fn != nil {
|
||||||
|
fn(im)
|
||||||
|
}
|
||||||
|
return im
|
||||||
|
}
|
||||||
|
|
||||||
|
// Route creates a new Mux and mounts it along the `pattern` as a subrouter.
|
||||||
|
// Effectively, this is a short-hand call to Mount. See _examples/.
|
||||||
|
func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
|
||||||
|
if fn == nil {
|
||||||
|
panic(fmt.Sprintf("chi: attempting to Route() a nil subrouter on '%s'", pattern))
|
||||||
|
}
|
||||||
|
subRouter := NewRouter()
|
||||||
|
fn(subRouter)
|
||||||
|
mx.Mount(pattern, subRouter)
|
||||||
|
return subRouter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount attaches another http.Handler or chi Router as a subrouter along a routing
|
||||||
|
// path. It's very useful to split up a large API as many independent routers and
|
||||||
|
// compose them as a single service using Mount. See _examples/.
|
||||||
|
//
|
||||||
|
// Note that Mount() simply sets a wildcard along the `pattern` that will continue
|
||||||
|
// routing at the `handler`, which in most cases is another chi.Router. As a result,
|
||||||
|
// if you define two Mount() routes on the exact same pattern the mount will panic.
|
||||||
|
func (mx *Mux) Mount(pattern string, handler http.Handler) {
|
||||||
|
if handler == nil {
|
||||||
|
panic(fmt.Sprintf("chi: attempting to Mount() a nil handler on '%s'", pattern))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provide runtime safety for ensuring a pattern isn't mounted on an existing
|
||||||
|
// routing pattern.
|
||||||
|
if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
|
||||||
|
panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign sub-Router's with the parent not found & method not allowed handler if not specified.
|
||||||
|
subr, ok := handler.(*Mux)
|
||||||
|
if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
|
||||||
|
subr.NotFound(mx.notFoundHandler)
|
||||||
|
}
|
||||||
|
if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
|
||||||
|
subr.MethodNotAllowed(mx.methodNotAllowedHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
rctx := RouteContext(r.Context())
|
||||||
|
|
||||||
|
// shift the url path past the previous subrouter
|
||||||
|
rctx.RoutePath = mx.nextRoutePath(rctx)
|
||||||
|
|
||||||
|
// reset the wildcard URLParam which connects the subrouter
|
||||||
|
n := len(rctx.URLParams.Keys) - 1
|
||||||
|
if n >= 0 && rctx.URLParams.Keys[n] == "*" && len(rctx.URLParams.Values) > n {
|
||||||
|
rctx.URLParams.Values[n] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
|
||||||
|
if pattern == "" || pattern[len(pattern)-1] != '/' {
|
||||||
|
mx.handle(mALL|mSTUB, pattern, mountHandler)
|
||||||
|
mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
|
||||||
|
pattern += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
method := mALL
|
||||||
|
subroutes, _ := handler.(Routes)
|
||||||
|
if subroutes != nil {
|
||||||
|
method |= mSTUB
|
||||||
|
}
|
||||||
|
n := mx.handle(method, pattern+"*", mountHandler)
|
||||||
|
|
||||||
|
if subroutes != nil {
|
||||||
|
n.subroutes = subroutes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routes returns a slice of routing information from the tree,
|
||||||
|
// useful for traversing available routes of a router.
|
||||||
|
func (mx *Mux) Routes() []Route {
|
||||||
|
return mx.tree.routes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Middlewares returns a slice of middleware handler functions.
|
||||||
|
func (mx *Mux) Middlewares() Middlewares {
|
||||||
|
return mx.middlewares
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match searches the routing tree for a handler that matches the method/path.
|
||||||
|
// It's similar to routing a http request, but without executing the handler
|
||||||
|
// thereafter.
|
||||||
|
//
|
||||||
|
// Note: the *Context state is updated during execution, so manage
|
||||||
|
// the state carefully or make a NewRouteContext().
|
||||||
|
func (mx *Mux) Match(rctx *Context, method, path string) bool {
|
||||||
|
return mx.Find(rctx, method, path) != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find searches the routing tree for the pattern that matches
|
||||||
|
// the method/path.
|
||||||
|
//
|
||||||
|
// Note: the *Context state is updated during execution, so manage
|
||||||
|
// the state carefully or make a NewRouteContext().
|
||||||
|
func (mx *Mux) Find(rctx *Context, method, path string) string {
|
||||||
|
m, ok := methodMap[method]
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
node, _, _ := mx.tree.FindRoute(rctx, m, path)
|
||||||
|
pattern := rctx.routePattern
|
||||||
|
|
||||||
|
if node != nil {
|
||||||
|
if node.subroutes == nil {
|
||||||
|
e := node.endpoints[m]
|
||||||
|
return e.pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
rctx.RoutePath = mx.nextRoutePath(rctx)
|
||||||
|
subPattern := node.subroutes.Find(rctx, method, rctx.RoutePath)
|
||||||
|
if subPattern == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
pattern = strings.TrimSuffix(pattern, "/*")
|
||||||
|
pattern += subPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
return pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotFoundHandler returns the default Mux 404 responder whenever a route
|
||||||
|
// cannot be found.
|
||||||
|
func (mx *Mux) NotFoundHandler() http.HandlerFunc {
|
||||||
|
if mx.notFoundHandler != nil {
|
||||||
|
return mx.notFoundHandler
|
||||||
|
}
|
||||||
|
return http.NotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// MethodNotAllowedHandler returns the default Mux 405 responder whenever
|
||||||
|
// a method cannot be resolved for a route.
|
||||||
|
func (mx *Mux) MethodNotAllowedHandler(methodsAllowed ...methodTyp) http.HandlerFunc {
|
||||||
|
if mx.methodNotAllowedHandler != nil {
|
||||||
|
return mx.methodNotAllowedHandler
|
||||||
|
}
|
||||||
|
return methodNotAllowedHandler(methodsAllowed...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle registers a http.Handler in the routing tree for a particular http method
|
||||||
|
// and routing pattern.
|
||||||
|
func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
|
||||||
|
if len(pattern) == 0 || pattern[0] != '/' {
|
||||||
|
panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the computed routing handler for this routing pattern.
|
||||||
|
if !mx.inline && mx.handler == nil {
|
||||||
|
mx.updateRouteHandler()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build endpoint handler with inline middlewares for the route
|
||||||
|
var h http.Handler
|
||||||
|
if mx.inline {
|
||||||
|
mx.handler = http.HandlerFunc(mx.routeHTTP)
|
||||||
|
h = Chain(mx.middlewares...).Handler(handler)
|
||||||
|
} else {
|
||||||
|
h = handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the endpoint to the tree and return the node
|
||||||
|
return mx.tree.InsertRoute(method, pattern, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// routeHTTP routes a http.Request through the Mux routing tree to serve
|
||||||
|
// the matching handler for a particular http method.
|
||||||
|
func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Grab the route context object
|
||||||
|
rctx := r.Context().Value(RouteCtxKey).(*Context)
|
||||||
|
|
||||||
|
// The request routing path
|
||||||
|
routePath := rctx.RoutePath
|
||||||
|
if routePath == "" {
|
||||||
|
if r.URL.RawPath != "" {
|
||||||
|
routePath = r.URL.RawPath
|
||||||
|
} else {
|
||||||
|
routePath = r.URL.Path
|
||||||
|
}
|
||||||
|
if routePath == "" {
|
||||||
|
routePath = "/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if method is supported by chi
|
||||||
|
if rctx.RouteMethod == "" {
|
||||||
|
rctx.RouteMethod = r.Method
|
||||||
|
}
|
||||||
|
method, ok := methodMap[rctx.RouteMethod]
|
||||||
|
if !ok {
|
||||||
|
mx.MethodNotAllowedHandler().ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the route
|
||||||
|
if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
|
||||||
|
// Set http.Request path values from our request context
|
||||||
|
for i, key := range rctx.URLParams.Keys {
|
||||||
|
value := rctx.URLParams.Values[i]
|
||||||
|
r.SetPathValue(key, value)
|
||||||
|
}
|
||||||
|
if supportsPattern {
|
||||||
|
setPattern(rctx, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if rctx.methodNotAllowed {
|
||||||
|
mx.MethodNotAllowedHandler(rctx.methodsAllowed...).ServeHTTP(w, r)
|
||||||
|
} else {
|
||||||
|
mx.NotFoundHandler().ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mx *Mux) nextRoutePath(rctx *Context) string {
|
||||||
|
routePath := "/"
|
||||||
|
nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
|
||||||
|
if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
|
||||||
|
routePath = "/" + rctx.routeParams.Values[nx]
|
||||||
|
}
|
||||||
|
return routePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively update data on child routers.
|
||||||
|
func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
|
||||||
|
for _, r := range mx.tree.routes() {
|
||||||
|
subMux, ok := r.SubRoutes.(*Mux)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fn(subMux)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateRouteHandler builds the single mux handler that is a chain of the middleware
|
||||||
|
// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
|
||||||
|
// point, no other middlewares can be registered on this Mux's stack. But you can still
|
||||||
|
// compose additional middlewares via Group()'s or using a chained middleware handler.
|
||||||
|
func (mx *Mux) updateRouteHandler() {
|
||||||
|
mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
|
||||||
|
}
|
||||||
|
|
||||||
|
// methodNotAllowedHandler is a helper function to respond with a 405,
|
||||||
|
// method not allowed. It sets the Allow header with the list of allowed
|
||||||
|
// methods for the route.
|
||||||
|
func methodNotAllowedHandler(methodsAllowed ...methodTyp) func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
for _, m := range methodsAllowed {
|
||||||
|
w.Header().Add("Allow", reverseMethodMap[m])
|
||||||
|
}
|
||||||
|
w.WriteHeader(405)
|
||||||
|
w.Write(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
16
vendor/github.com/go-chi/chi/v5/pattern.go
generated
vendored
Normal file
16
vendor/github.com/go-chi/chi/v5/pattern.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
//go:build go1.23 && !tinygo
|
||||||
|
// +build go1.23,!tinygo
|
||||||
|
|
||||||
|
package chi
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// supportsPattern is true if the Go version is 1.23 and above.
|
||||||
|
//
|
||||||
|
// If this is true, `net/http.Request` has field `Pattern`.
|
||||||
|
const supportsPattern = true
|
||||||
|
|
||||||
|
// setPattern sets the mux matched pattern in the http Request.
|
||||||
|
func setPattern(rctx *Context, r *http.Request) {
|
||||||
|
r.Pattern = rctx.routePattern
|
||||||
|
}
|
||||||
17
vendor/github.com/go-chi/chi/v5/pattern_fallback.go
generated
vendored
Normal file
17
vendor/github.com/go-chi/chi/v5/pattern_fallback.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
//go:build !go1.23 || tinygo
|
||||||
|
// +build !go1.23 tinygo
|
||||||
|
|
||||||
|
package chi
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// supportsPattern is true if the Go version is 1.23 and above.
|
||||||
|
//
|
||||||
|
// If this is true, `net/http.Request` has field `Pattern`.
|
||||||
|
const supportsPattern = false
|
||||||
|
|
||||||
|
// setPattern sets the mux matched pattern in the http Request.
|
||||||
|
//
|
||||||
|
// setPattern is only supported in Go 1.23 and above so
|
||||||
|
// this is just a blank function so that it compiles.
|
||||||
|
func setPattern(rctx *Context, r *http.Request) {}
|
||||||
872
vendor/github.com/go-chi/chi/v5/tree.go
generated
vendored
Normal file
872
vendor/github.com/go-chi/chi/v5/tree.go
generated
vendored
Normal file
@@ -0,0 +1,872 @@
|
|||||||
|
package chi
|
||||||
|
|
||||||
|
// Radix tree implementation below is a based on the original work by
|
||||||
|
// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
|
||||||
|
// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type methodTyp uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
mSTUB methodTyp = 1 << iota
|
||||||
|
mCONNECT
|
||||||
|
mDELETE
|
||||||
|
mGET
|
||||||
|
mHEAD
|
||||||
|
mOPTIONS
|
||||||
|
mPATCH
|
||||||
|
mPOST
|
||||||
|
mPUT
|
||||||
|
mTRACE
|
||||||
|
)
|
||||||
|
|
||||||
|
var mALL = mCONNECT | mDELETE | mGET | mHEAD |
|
||||||
|
mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
|
||||||
|
|
||||||
|
var methodMap = map[string]methodTyp{
|
||||||
|
http.MethodConnect: mCONNECT,
|
||||||
|
http.MethodDelete: mDELETE,
|
||||||
|
http.MethodGet: mGET,
|
||||||
|
http.MethodHead: mHEAD,
|
||||||
|
http.MethodOptions: mOPTIONS,
|
||||||
|
http.MethodPatch: mPATCH,
|
||||||
|
http.MethodPost: mPOST,
|
||||||
|
http.MethodPut: mPUT,
|
||||||
|
http.MethodTrace: mTRACE,
|
||||||
|
}
|
||||||
|
|
||||||
|
var reverseMethodMap = map[methodTyp]string{
|
||||||
|
mCONNECT: http.MethodConnect,
|
||||||
|
mDELETE: http.MethodDelete,
|
||||||
|
mGET: http.MethodGet,
|
||||||
|
mHEAD: http.MethodHead,
|
||||||
|
mOPTIONS: http.MethodOptions,
|
||||||
|
mPATCH: http.MethodPatch,
|
||||||
|
mPOST: http.MethodPost,
|
||||||
|
mPUT: http.MethodPut,
|
||||||
|
mTRACE: http.MethodTrace,
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMethod adds support for custom HTTP method handlers, available
|
||||||
|
// via Router#Method and Router#MethodFunc
|
||||||
|
func RegisterMethod(method string) {
|
||||||
|
if method == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
method = strings.ToUpper(method)
|
||||||
|
if _, ok := methodMap[method]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n := len(methodMap)
|
||||||
|
if n > strconv.IntSize-2 {
|
||||||
|
panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
|
||||||
|
}
|
||||||
|
mt := methodTyp(2 << n)
|
||||||
|
methodMap[method] = mt
|
||||||
|
reverseMethodMap[mt] = method
|
||||||
|
mALL |= mt
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeTyp uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
ntStatic nodeTyp = iota // /home
|
||||||
|
ntRegexp // /{id:[0-9]+}
|
||||||
|
ntParam // /{user}
|
||||||
|
ntCatchAll // /api/v1/*
|
||||||
|
)
|
||||||
|
|
||||||
|
type node struct {
|
||||||
|
// subroutes on the leaf node
|
||||||
|
subroutes Routes
|
||||||
|
|
||||||
|
// regexp matcher for regexp nodes
|
||||||
|
rex *regexp.Regexp
|
||||||
|
|
||||||
|
// HTTP handler endpoints on the leaf node
|
||||||
|
endpoints endpoints
|
||||||
|
|
||||||
|
// prefix is the common prefix we ignore
|
||||||
|
prefix string
|
||||||
|
|
||||||
|
// child nodes should be stored in-order for iteration,
|
||||||
|
// in groups of the node type.
|
||||||
|
children [ntCatchAll + 1]nodes
|
||||||
|
|
||||||
|
// first byte of the child prefix
|
||||||
|
tail byte
|
||||||
|
|
||||||
|
// node type: static, regexp, param, catchAll
|
||||||
|
typ nodeTyp
|
||||||
|
|
||||||
|
// first byte of the prefix
|
||||||
|
label byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// endpoints is a mapping of http method constants to handlers
|
||||||
|
// for a given route.
|
||||||
|
type endpoints map[methodTyp]*endpoint
|
||||||
|
|
||||||
|
type endpoint struct {
|
||||||
|
// endpoint handler
|
||||||
|
handler http.Handler
|
||||||
|
|
||||||
|
// pattern is the routing pattern for handler nodes
|
||||||
|
pattern string
|
||||||
|
|
||||||
|
// parameter keys recorded on handler nodes
|
||||||
|
paramKeys []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s endpoints) Value(method methodTyp) *endpoint {
|
||||||
|
mh, ok := s[method]
|
||||||
|
if !ok {
|
||||||
|
mh = &endpoint{}
|
||||||
|
s[method] = mh
|
||||||
|
}
|
||||||
|
return mh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
|
||||||
|
var parent *node
|
||||||
|
search := pattern
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Handle key exhaustion
|
||||||
|
if len(search) == 0 {
|
||||||
|
// Insert or update the node's leaf handler
|
||||||
|
n.setEndpoint(method, handler, pattern)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're going to be searching for a wild node next,
|
||||||
|
// in this case, we need to get the tail
|
||||||
|
var label = search[0]
|
||||||
|
var segTail byte
|
||||||
|
var segEndIdx int
|
||||||
|
var segTyp nodeTyp
|
||||||
|
var segRexpat string
|
||||||
|
if label == '{' || label == '*' {
|
||||||
|
segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
|
||||||
|
}
|
||||||
|
|
||||||
|
var prefix string
|
||||||
|
if segTyp == ntRegexp {
|
||||||
|
prefix = segRexpat
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for the edge to attach to
|
||||||
|
parent = n
|
||||||
|
n = n.getEdge(segTyp, label, segTail, prefix)
|
||||||
|
|
||||||
|
// No edge, create one
|
||||||
|
if n == nil {
|
||||||
|
child := &node{label: label, tail: segTail, prefix: search}
|
||||||
|
hn := parent.addChild(child, search)
|
||||||
|
hn.setEndpoint(method, handler, pattern)
|
||||||
|
|
||||||
|
return hn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Found an edge to match the pattern
|
||||||
|
|
||||||
|
if n.typ > ntStatic {
|
||||||
|
// We found a param node, trim the param from the search path and continue.
|
||||||
|
// This param/wild pattern segment would already be on the tree from a previous
|
||||||
|
// call to addChild when creating a new node.
|
||||||
|
search = search[segEndIdx:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Static nodes fall below here.
|
||||||
|
// Determine longest prefix of the search key on match.
|
||||||
|
commonPrefix := longestPrefix(search, n.prefix)
|
||||||
|
if commonPrefix == len(n.prefix) {
|
||||||
|
// the common prefix is as long as the current node's prefix we're attempting to insert.
|
||||||
|
// keep the search going.
|
||||||
|
search = search[commonPrefix:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split the node
|
||||||
|
child := &node{
|
||||||
|
typ: ntStatic,
|
||||||
|
prefix: search[:commonPrefix],
|
||||||
|
}
|
||||||
|
parent.replaceChild(search[0], segTail, child)
|
||||||
|
|
||||||
|
// Restore the existing node
|
||||||
|
n.label = n.prefix[commonPrefix]
|
||||||
|
n.prefix = n.prefix[commonPrefix:]
|
||||||
|
child.addChild(n, n.prefix)
|
||||||
|
|
||||||
|
// If the new key is a subset, set the method/handler on this node and finish.
|
||||||
|
search = search[commonPrefix:]
|
||||||
|
if len(search) == 0 {
|
||||||
|
child.setEndpoint(method, handler, pattern)
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new edge for the node
|
||||||
|
subchild := &node{
|
||||||
|
typ: ntStatic,
|
||||||
|
label: search[0],
|
||||||
|
prefix: search,
|
||||||
|
}
|
||||||
|
hn := child.addChild(subchild, search)
|
||||||
|
hn.setEndpoint(method, handler, pattern)
|
||||||
|
return hn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
|
||||||
|
// For a URL router like chi's, we split the static, param, regexp and wildcard segments
|
||||||
|
// into different nodes. In addition, addChild will recursively call itself until every
|
||||||
|
// pattern segment is added to the url pattern tree as individual nodes, depending on type.
|
||||||
|
func (n *node) addChild(child *node, prefix string) *node {
|
||||||
|
search := prefix
|
||||||
|
|
||||||
|
// handler leaf node added to the tree is the child.
|
||||||
|
// this may be overridden later down the flow
|
||||||
|
hn := child
|
||||||
|
|
||||||
|
// Parse next segment
|
||||||
|
segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
|
||||||
|
|
||||||
|
// Add child depending on next up segment
|
||||||
|
switch segTyp {
|
||||||
|
|
||||||
|
case ntStatic:
|
||||||
|
// Search prefix is all static (that is, has no params in path)
|
||||||
|
// noop
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Search prefix contains a param, regexp or wildcard
|
||||||
|
|
||||||
|
if segTyp == ntRegexp {
|
||||||
|
rex, err := regexp.Compile(segRexpat)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
|
||||||
|
}
|
||||||
|
child.prefix = segRexpat
|
||||||
|
child.rex = rex
|
||||||
|
}
|
||||||
|
|
||||||
|
if segStartIdx == 0 {
|
||||||
|
// Route starts with a param
|
||||||
|
child.typ = segTyp
|
||||||
|
|
||||||
|
if segTyp == ntCatchAll {
|
||||||
|
segStartIdx = -1
|
||||||
|
} else {
|
||||||
|
segStartIdx = segEndIdx
|
||||||
|
}
|
||||||
|
if segStartIdx < 0 {
|
||||||
|
segStartIdx = len(search)
|
||||||
|
}
|
||||||
|
child.tail = segTail // for params, we set the tail
|
||||||
|
|
||||||
|
if segStartIdx != len(search) {
|
||||||
|
// add static edge for the remaining part, split the end.
|
||||||
|
// its not possible to have adjacent param nodes, so its certainly
|
||||||
|
// going to be a static node next.
|
||||||
|
|
||||||
|
search = search[segStartIdx:] // advance search position
|
||||||
|
|
||||||
|
nn := &node{
|
||||||
|
typ: ntStatic,
|
||||||
|
label: search[0],
|
||||||
|
prefix: search,
|
||||||
|
}
|
||||||
|
hn = child.addChild(nn, search)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if segStartIdx > 0 {
|
||||||
|
// Route has some param
|
||||||
|
|
||||||
|
// starts with a static segment
|
||||||
|
child.typ = ntStatic
|
||||||
|
child.prefix = search[:segStartIdx]
|
||||||
|
child.rex = nil
|
||||||
|
|
||||||
|
// add the param edge node
|
||||||
|
search = search[segStartIdx:]
|
||||||
|
|
||||||
|
nn := &node{
|
||||||
|
typ: segTyp,
|
||||||
|
label: search[0],
|
||||||
|
tail: segTail,
|
||||||
|
}
|
||||||
|
hn = child.addChild(nn, search)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.children[child.typ] = append(n.children[child.typ], child)
|
||||||
|
n.children[child.typ].Sort()
|
||||||
|
return hn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) replaceChild(label, tail byte, child *node) {
|
||||||
|
for i := 0; i < len(n.children[child.typ]); i++ {
|
||||||
|
if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
|
||||||
|
n.children[child.typ][i] = child
|
||||||
|
n.children[child.typ][i].label = label
|
||||||
|
n.children[child.typ][i].tail = tail
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("chi: replacing missing child")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
|
||||||
|
nds := n.children[ntyp]
|
||||||
|
for i := range nds {
|
||||||
|
if nds[i].label == label && nds[i].tail == tail {
|
||||||
|
if ntyp == ntRegexp && nds[i].prefix != prefix {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nds[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
|
||||||
|
// Set the handler for the method type on the node
|
||||||
|
if n.endpoints == nil {
|
||||||
|
n.endpoints = make(endpoints)
|
||||||
|
}
|
||||||
|
|
||||||
|
paramKeys := patParamKeys(pattern)
|
||||||
|
|
||||||
|
if method&mSTUB == mSTUB {
|
||||||
|
n.endpoints.Value(mSTUB).handler = handler
|
||||||
|
}
|
||||||
|
if method&mALL == mALL {
|
||||||
|
h := n.endpoints.Value(mALL)
|
||||||
|
h.handler = handler
|
||||||
|
h.pattern = pattern
|
||||||
|
h.paramKeys = paramKeys
|
||||||
|
for _, m := range methodMap {
|
||||||
|
h := n.endpoints.Value(m)
|
||||||
|
h.handler = handler
|
||||||
|
h.pattern = pattern
|
||||||
|
h.paramKeys = paramKeys
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
h := n.endpoints.Value(method)
|
||||||
|
h.handler = handler
|
||||||
|
h.pattern = pattern
|
||||||
|
h.paramKeys = paramKeys
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
|
||||||
|
// Reset the context routing pattern and params
|
||||||
|
rctx.routePattern = ""
|
||||||
|
rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
|
||||||
|
rctx.routeParams.Values = rctx.routeParams.Values[:0]
|
||||||
|
|
||||||
|
// Find the routing handlers for the path
|
||||||
|
rn := n.findRoute(rctx, method, path)
|
||||||
|
if rn == nil {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record the routing params in the request lifecycle
|
||||||
|
rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
|
||||||
|
rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
|
||||||
|
|
||||||
|
// Record the routing pattern in the request lifecycle
|
||||||
|
if rn.endpoints[method].pattern != "" {
|
||||||
|
rctx.routePattern = rn.endpoints[method].pattern
|
||||||
|
rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rn, rn.endpoints, rn.endpoints[method].handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursive edge traversal by checking all nodeTyp groups along the way.
|
||||||
|
// It's like searching through a multi-dimensional radix trie.
|
||||||
|
func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
|
||||||
|
nn := n
|
||||||
|
search := path
|
||||||
|
|
||||||
|
for t, nds := range nn.children {
|
||||||
|
ntyp := nodeTyp(t)
|
||||||
|
if len(nds) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var xn *node
|
||||||
|
xsearch := search
|
||||||
|
|
||||||
|
var label byte
|
||||||
|
if search != "" {
|
||||||
|
label = search[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ntyp {
|
||||||
|
case ntStatic:
|
||||||
|
xn = nds.findEdge(label)
|
||||||
|
if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
xsearch = xsearch[len(xn.prefix):]
|
||||||
|
|
||||||
|
case ntParam, ntRegexp:
|
||||||
|
// short-circuit and return no matching route for empty param values
|
||||||
|
if xsearch == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// serially loop through each node grouped by the tail delimiter
|
||||||
|
for _, xn = range nds {
|
||||||
|
// label for param nodes is the delimiter byte
|
||||||
|
p := strings.IndexByte(xsearch, xn.tail)
|
||||||
|
|
||||||
|
if p < 0 {
|
||||||
|
if xn.tail == '/' {
|
||||||
|
p = len(xsearch)
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if ntyp == ntRegexp && p == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ntyp == ntRegexp && xn.rex != nil {
|
||||||
|
if !xn.rex.MatchString(xsearch[:p]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if strings.IndexByte(xsearch[:p], '/') != -1 {
|
||||||
|
// avoid a match across path segments
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
prevlen := len(rctx.routeParams.Values)
|
||||||
|
rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
|
||||||
|
xsearch = xsearch[p:]
|
||||||
|
|
||||||
|
if len(xsearch) == 0 {
|
||||||
|
if xn.isLeaf() {
|
||||||
|
h := xn.endpoints[method]
|
||||||
|
if h != nil && h.handler != nil {
|
||||||
|
rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
|
||||||
|
return xn
|
||||||
|
}
|
||||||
|
|
||||||
|
for endpoints := range xn.endpoints {
|
||||||
|
if endpoints == mALL || endpoints == mSTUB {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints)
|
||||||
|
}
|
||||||
|
|
||||||
|
// flag that the routing context found a route, but not a corresponding
|
||||||
|
// supported method
|
||||||
|
rctx.methodNotAllowed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// recursively find the next node on this branch
|
||||||
|
fin := xn.findRoute(rctx, method, xsearch)
|
||||||
|
if fin != nil {
|
||||||
|
return fin
|
||||||
|
}
|
||||||
|
|
||||||
|
// not found on this branch, reset vars
|
||||||
|
rctx.routeParams.Values = rctx.routeParams.Values[:prevlen]
|
||||||
|
xsearch = search
|
||||||
|
}
|
||||||
|
|
||||||
|
rctx.routeParams.Values = append(rctx.routeParams.Values, "")
|
||||||
|
|
||||||
|
default:
|
||||||
|
// catch-all nodes
|
||||||
|
rctx.routeParams.Values = append(rctx.routeParams.Values, search)
|
||||||
|
xn = nds[0]
|
||||||
|
xsearch = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if xn == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// did we find it yet?
|
||||||
|
if len(xsearch) == 0 {
|
||||||
|
if xn.isLeaf() {
|
||||||
|
h := xn.endpoints[method]
|
||||||
|
if h != nil && h.handler != nil {
|
||||||
|
rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
|
||||||
|
return xn
|
||||||
|
}
|
||||||
|
|
||||||
|
for endpoints := range xn.endpoints {
|
||||||
|
if endpoints == mALL || endpoints == mSTUB {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints)
|
||||||
|
}
|
||||||
|
|
||||||
|
// flag that the routing context found a route, but not a corresponding
|
||||||
|
// supported method
|
||||||
|
rctx.methodNotAllowed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// recursively find the next node..
|
||||||
|
fin := xn.findRoute(rctx, method, xsearch)
|
||||||
|
if fin != nil {
|
||||||
|
return fin
|
||||||
|
}
|
||||||
|
|
||||||
|
// Did not find final handler, let's remove the param here if it was set
|
||||||
|
if xn.typ > ntStatic {
|
||||||
|
if len(rctx.routeParams.Values) > 0 {
|
||||||
|
rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
|
||||||
|
nds := n.children[ntyp]
|
||||||
|
num := len(nds)
|
||||||
|
idx := 0
|
||||||
|
|
||||||
|
switch ntyp {
|
||||||
|
case ntStatic, ntParam, ntRegexp:
|
||||||
|
i, j := 0, num-1
|
||||||
|
for i <= j {
|
||||||
|
idx = i + (j-i)/2
|
||||||
|
if label > nds[idx].label {
|
||||||
|
i = idx + 1
|
||||||
|
} else if label < nds[idx].label {
|
||||||
|
j = idx - 1
|
||||||
|
} else {
|
||||||
|
i = num // breaks cond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nds[idx].label != label {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nds[idx]
|
||||||
|
|
||||||
|
default: // catch all
|
||||||
|
return nds[idx]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) isLeaf() bool {
|
||||||
|
return n.endpoints != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) findPattern(pattern string) bool {
|
||||||
|
nn := n
|
||||||
|
for _, nds := range nn.children {
|
||||||
|
if len(nds) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
n = nn.findEdge(nds[0].typ, pattern[0])
|
||||||
|
if n == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var idx int
|
||||||
|
var xpattern string
|
||||||
|
|
||||||
|
switch n.typ {
|
||||||
|
case ntStatic:
|
||||||
|
idx = longestPrefix(pattern, n.prefix)
|
||||||
|
if idx < len(n.prefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
case ntParam, ntRegexp:
|
||||||
|
idx = strings.IndexByte(pattern, '}') + 1
|
||||||
|
|
||||||
|
case ntCatchAll:
|
||||||
|
idx = longestPrefix(pattern, "*")
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("chi: unknown node type")
|
||||||
|
}
|
||||||
|
|
||||||
|
xpattern = pattern[idx:]
|
||||||
|
if len(xpattern) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return n.findPattern(xpattern)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) routes() []Route {
|
||||||
|
rts := []Route{}
|
||||||
|
|
||||||
|
n.walk(func(eps endpoints, subroutes Routes) bool {
|
||||||
|
if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group methodHandlers by unique patterns
|
||||||
|
pats := make(map[string]endpoints)
|
||||||
|
|
||||||
|
for mt, h := range eps {
|
||||||
|
if h.pattern == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p, ok := pats[h.pattern]
|
||||||
|
if !ok {
|
||||||
|
p = endpoints{}
|
||||||
|
pats[h.pattern] = p
|
||||||
|
}
|
||||||
|
p[mt] = h
|
||||||
|
}
|
||||||
|
|
||||||
|
for p, mh := range pats {
|
||||||
|
hs := make(map[string]http.Handler)
|
||||||
|
if mh[mALL] != nil && mh[mALL].handler != nil {
|
||||||
|
hs["*"] = mh[mALL].handler
|
||||||
|
}
|
||||||
|
|
||||||
|
for mt, h := range mh {
|
||||||
|
if h.handler == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if m, ok := reverseMethodMap[mt]; ok {
|
||||||
|
hs[m] = h.handler
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rt := Route{subroutes, hs, p}
|
||||||
|
rts = append(rts, rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
return rts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
|
||||||
|
// Visit the leaf values if any
|
||||||
|
if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recurse on the children
|
||||||
|
for _, ns := range n.children {
|
||||||
|
for _, cn := range ns {
|
||||||
|
if cn.walk(fn) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// patNextSegment returns the next segment details from a pattern:
|
||||||
|
// node type, param key, regexp string, param tail byte, param starting index, param ending index
|
||||||
|
func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
|
||||||
|
ps := strings.Index(pattern, "{")
|
||||||
|
ws := strings.Index(pattern, "*")
|
||||||
|
|
||||||
|
if ps < 0 && ws < 0 {
|
||||||
|
return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if ps >= 0 && ws >= 0 && ws < ps {
|
||||||
|
panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
|
||||||
|
}
|
||||||
|
|
||||||
|
var tail byte = '/' // Default endpoint tail to / byte
|
||||||
|
|
||||||
|
if ps >= 0 {
|
||||||
|
// Param/Regexp pattern is next
|
||||||
|
nt := ntParam
|
||||||
|
|
||||||
|
// Read to closing } taking into account opens and closes in curl count (cc)
|
||||||
|
cc := 0
|
||||||
|
pe := ps
|
||||||
|
for i, c := range pattern[ps:] {
|
||||||
|
if c == '{' {
|
||||||
|
cc++
|
||||||
|
} else if c == '}' {
|
||||||
|
cc--
|
||||||
|
if cc == 0 {
|
||||||
|
pe = ps + i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pe == ps {
|
||||||
|
panic("chi: route param closing delimiter '}' is missing")
|
||||||
|
}
|
||||||
|
|
||||||
|
key := pattern[ps+1 : pe]
|
||||||
|
pe++ // set end to next position
|
||||||
|
|
||||||
|
if pe < len(pattern) {
|
||||||
|
tail = pattern[pe]
|
||||||
|
}
|
||||||
|
|
||||||
|
key, rexpat, isRegexp := strings.Cut(key, ":")
|
||||||
|
if isRegexp {
|
||||||
|
nt = ntRegexp
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rexpat) > 0 {
|
||||||
|
if rexpat[0] != '^' {
|
||||||
|
rexpat = "^" + rexpat
|
||||||
|
}
|
||||||
|
if rexpat[len(rexpat)-1] != '$' {
|
||||||
|
rexpat += "$"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nt, key, rexpat, tail, ps, pe
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wildcard pattern as finale
|
||||||
|
if ws < len(pattern)-1 {
|
||||||
|
panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
|
||||||
|
}
|
||||||
|
return ntCatchAll, "*", "", 0, ws, len(pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
func patParamKeys(pattern string) []string {
|
||||||
|
pat := pattern
|
||||||
|
paramKeys := []string{}
|
||||||
|
for {
|
||||||
|
ptyp, paramKey, _, _, _, e := patNextSegment(pat)
|
||||||
|
if ptyp == ntStatic {
|
||||||
|
return paramKeys
|
||||||
|
}
|
||||||
|
for i := 0; i < len(paramKeys); i++ {
|
||||||
|
if paramKeys[i] == paramKey {
|
||||||
|
panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
paramKeys = append(paramKeys, paramKey)
|
||||||
|
pat = pat[e:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// longestPrefix finds the length of the shared prefix of two strings
|
||||||
|
func longestPrefix(k1, k2 string) (i int) {
|
||||||
|
for i = 0; i < min(len(k1), len(k2)); i++ {
|
||||||
|
if k1[i] != k2[i] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodes []*node
|
||||||
|
|
||||||
|
// Sort the list of nodes by label
|
||||||
|
func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
|
||||||
|
func (ns nodes) Len() int { return len(ns) }
|
||||||
|
func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
||||||
|
func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
|
||||||
|
|
||||||
|
// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
|
||||||
|
// The list order determines the traversal order.
|
||||||
|
func (ns nodes) tailSort() {
|
||||||
|
for i := len(ns) - 1; i >= 0; i-- {
|
||||||
|
if ns[i].typ > ntStatic && ns[i].tail == '/' {
|
||||||
|
ns.Swap(i, len(ns)-1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns nodes) findEdge(label byte) *node {
|
||||||
|
num := len(ns)
|
||||||
|
idx := 0
|
||||||
|
i, j := 0, num-1
|
||||||
|
for i <= j {
|
||||||
|
idx = i + (j-i)/2
|
||||||
|
if label > ns[idx].label {
|
||||||
|
i = idx + 1
|
||||||
|
} else if label < ns[idx].label {
|
||||||
|
j = idx - 1
|
||||||
|
} else {
|
||||||
|
i = num // breaks cond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ns[idx].label != label {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ns[idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Route describes the details of a routing handler.
|
||||||
|
// Handlers map key is an HTTP method
|
||||||
|
type Route struct {
|
||||||
|
SubRoutes Routes
|
||||||
|
Handlers map[string]http.Handler
|
||||||
|
Pattern string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WalkFunc is the type of the function called for each method and route visited by Walk.
|
||||||
|
type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
|
||||||
|
|
||||||
|
// Walk walks any router tree that implements Routes interface.
|
||||||
|
func Walk(r Routes, walkFn WalkFunc) error {
|
||||||
|
return walk(r, walkFn, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
|
||||||
|
for _, route := range r.Routes() {
|
||||||
|
mws := make([]func(http.Handler) http.Handler, len(parentMw))
|
||||||
|
copy(mws, parentMw)
|
||||||
|
mws = append(mws, r.Middlewares()...)
|
||||||
|
|
||||||
|
if route.SubRoutes != nil {
|
||||||
|
if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for method, handler := range route.Handlers {
|
||||||
|
if method == "*" {
|
||||||
|
// Ignore a "catchAll" method, since we pass down all the specific methods for each route.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fullRoute := parentRoute + route.Pattern
|
||||||
|
fullRoute = strings.Replace(fullRoute, "/*/", "/", -1)
|
||||||
|
|
||||||
|
if chain, ok := handler.(*ChainHandler); ok {
|
||||||
|
if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := walkFn(method, fullRoute, handler, mws...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
21
vendor/github.com/go-viper/mapstructure/v2/.editorconfig
generated
vendored
Normal file
21
vendor/github.com/go-viper/mapstructure/v2/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
charset = utf-8
|
||||||
|
end_of_line = lf
|
||||||
|
indent_size = 4
|
||||||
|
indent_style = space
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
|
||||||
|
[*.go]
|
||||||
|
indent_style = tab
|
||||||
|
|
||||||
|
[{Makefile,*.mk}]
|
||||||
|
indent_style = tab
|
||||||
|
|
||||||
|
[*.nix]
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
[.golangci.yaml]
|
||||||
|
indent_size = 2
|
||||||
4
vendor/github.com/go-viper/mapstructure/v2/.envrc
generated
vendored
Normal file
4
vendor/github.com/go-viper/mapstructure/v2/.envrc
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
|
||||||
|
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
|
||||||
|
fi
|
||||||
|
use flake . --impure
|
||||||
6
vendor/github.com/go-viper/mapstructure/v2/.gitignore
generated
vendored
Normal file
6
vendor/github.com/go-viper/mapstructure/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
/.devenv/
|
||||||
|
/.direnv/
|
||||||
|
/.pre-commit-config.yaml
|
||||||
|
/bin/
|
||||||
|
/build/
|
||||||
|
/var/
|
||||||
48
vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
generated
vendored
Normal file
48
vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
version: "2"
|
||||||
|
|
||||||
|
run:
|
||||||
|
timeout: 10m
|
||||||
|
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
# - misspell
|
||||||
|
- nolintlint
|
||||||
|
# - revive
|
||||||
|
|
||||||
|
disable:
|
||||||
|
- errcheck
|
||||||
|
- staticcheck
|
||||||
|
- unused
|
||||||
|
|
||||||
|
settings:
|
||||||
|
misspell:
|
||||||
|
locale: US
|
||||||
|
nolintlint:
|
||||||
|
allow-unused: false # report any unused nolint directives
|
||||||
|
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- gci
|
||||||
|
- gofmt
|
||||||
|
- gofumpt
|
||||||
|
- goimports
|
||||||
|
# - golines
|
||||||
|
|
||||||
|
settings:
|
||||||
|
gci:
|
||||||
|
sections:
|
||||||
|
- standard
|
||||||
|
- default
|
||||||
|
- localmodule
|
||||||
|
gofmt:
|
||||||
|
simplify: true
|
||||||
|
rewrite-rules:
|
||||||
|
- pattern: interface{}
|
||||||
|
replacement: any
|
||||||
|
|
||||||
|
exclusions:
|
||||||
|
paths:
|
||||||
|
- internal/
|
||||||
104
vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
generated
vendored
Normal file
104
vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
> [!WARNING]
|
||||||
|
> As of v2 of this library, change log can be found in GitHub releases.
|
||||||
|
|
||||||
|
## 1.5.1
|
||||||
|
|
||||||
|
* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
|
||||||
|
* Fix map of slices not decoding properly in certain cases. [GH-266]
|
||||||
|
|
||||||
|
## 1.5.0
|
||||||
|
|
||||||
|
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
|
||||||
|
without `mapstructure` (or the configured tag name) set [GH-277]
|
||||||
|
* New option `ErrorUnset` which makes it an error if any fields
|
||||||
|
in a target struct are not set by the decoding process. [GH-225]
|
||||||
|
* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
|
||||||
|
* Decoding to slice from array no longer crashes [GH-265]
|
||||||
|
* Decode nested struct pointers to map [GH-271]
|
||||||
|
* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
|
||||||
|
* Fix issue where fields with `,omitempty` would sometimes decode
|
||||||
|
into a map with an empty string key [GH-281]
|
||||||
|
|
||||||
|
## 1.4.3
|
||||||
|
|
||||||
|
* Fix cases where `json.Number` didn't decode properly [GH-261]
|
||||||
|
|
||||||
|
## 1.4.2
|
||||||
|
|
||||||
|
* Custom name matchers to support any sort of casing, formatting, etc. for
|
||||||
|
field names. [GH-250]
|
||||||
|
* Fix possible panic in ComposeDecodeHookFunc [GH-251]
|
||||||
|
|
||||||
|
## 1.4.1
|
||||||
|
|
||||||
|
* Fix regression where `*time.Time` value would be set to empty and not be sent
|
||||||
|
to decode hooks properly [GH-232]
|
||||||
|
|
||||||
|
## 1.4.0
|
||||||
|
|
||||||
|
* A new decode hook type `DecodeHookFuncValue` has been added that has
|
||||||
|
access to the full values. [GH-183]
|
||||||
|
* Squash is now supported with embedded fields that are struct pointers [GH-205]
|
||||||
|
* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
|
||||||
|
|
||||||
|
## 1.3.3
|
||||||
|
|
||||||
|
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
|
||||||
|
|
||||||
|
## 1.3.2
|
||||||
|
|
||||||
|
* Decode into interface type with a struct value is supported [GH-187]
|
||||||
|
|
||||||
|
## 1.3.1
|
||||||
|
|
||||||
|
* Squash should only squash embedded structs. [GH-194]
|
||||||
|
|
||||||
|
## 1.3.0
|
||||||
|
|
||||||
|
* Added `",omitempty"` support. This will ignore zero values in the source
|
||||||
|
structure when encoding. [GH-145]
|
||||||
|
|
||||||
|
## 1.2.3
|
||||||
|
|
||||||
|
* Fix duplicate entries in Keys list with pointer values. [GH-185]
|
||||||
|
|
||||||
|
## 1.2.2
|
||||||
|
|
||||||
|
* Do not add unsettable (unexported) values to the unused metadata key
|
||||||
|
or "remain" value. [GH-150]
|
||||||
|
|
||||||
|
## 1.2.1
|
||||||
|
|
||||||
|
* Go modules checksum mismatch fix
|
||||||
|
|
||||||
|
## 1.2.0
|
||||||
|
|
||||||
|
* Added support to capture unused values in a field using the `",remain"` value
|
||||||
|
in the mapstructure tag. There is an example to showcase usage.
|
||||||
|
* Added `DecoderConfig` option to always squash embedded structs
|
||||||
|
* `json.Number` can decode into `uint` types
|
||||||
|
* Empty slices are preserved and not replaced with nil slices
|
||||||
|
* Fix panic that can occur in when decoding a map into a nil slice of structs
|
||||||
|
* Improved package documentation for godoc
|
||||||
|
|
||||||
|
## 1.1.2
|
||||||
|
|
||||||
|
* Fix error when decode hook decodes interface implementation into interface
|
||||||
|
type. [GH-140]
|
||||||
|
|
||||||
|
## 1.1.1
|
||||||
|
|
||||||
|
* Fix panic that can happen in `decodePtr`
|
||||||
|
|
||||||
|
## 1.1.0
|
||||||
|
|
||||||
|
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
|
||||||
|
* Support struct to struct decoding [GH-137]
|
||||||
|
* If source map value is nil, then destination map value is nil (instead of empty)
|
||||||
|
* If source slice value is nil, then destination slice value is nil (instead of empty)
|
||||||
|
* If source pointer is nil, then destination pointer is set to nil (instead of
|
||||||
|
allocated zero value of type)
|
||||||
|
|
||||||
|
## 1.0.0
|
||||||
|
|
||||||
|
* Initial tagged stable release.
|
||||||
21
vendor/github.com/go-viper/mapstructure/v2/LICENSE
generated
vendored
Normal file
21
vendor/github.com/go-viper/mapstructure/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Mitchell Hashimoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
81
vendor/github.com/go-viper/mapstructure/v2/README.md
generated
vendored
Normal file
81
vendor/github.com/go-viper/mapstructure/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# mapstructure
|
||||||
|
|
||||||
|
[](https://github.com/go-viper/mapstructure/actions/workflows/ci.yaml)
|
||||||
|
[](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
|
||||||
|

|
||||||
|
[](https://deps.dev/go/github.com%252Fgo-viper%252Fmapstructure%252Fv2)
|
||||||
|
|
||||||
|
mapstructure is a Go library for decoding generic map values to structures
|
||||||
|
and vice versa, while providing helpful error handling.
|
||||||
|
|
||||||
|
This library is most useful when decoding values from some data stream (JSON,
|
||||||
|
Gob, etc.) where you don't _quite_ know the structure of the underlying data
|
||||||
|
until you read a part of it. You can therefore read a `map[string]interface{}`
|
||||||
|
and use this library to decode it into the proper underlying native Go
|
||||||
|
structure.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go get github.com/go-viper/mapstructure/v2
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migrating from `github.com/mitchellh/mapstructure`
|
||||||
|
|
||||||
|
[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
|
||||||
|
|
||||||
|
You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
|
||||||
|
The API is the same, so you don't need to change anything else.
|
||||||
|
|
||||||
|
Here is a script that can help you with the migration:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sed -i 's|github.com/mitchellh/mapstructure|github.com/go-viper/mapstructure/v2|g' $(find . -type f -name '*.go')
|
||||||
|
```
|
||||||
|
|
||||||
|
If you need more time to migrate your code, that is absolutely fine.
|
||||||
|
|
||||||
|
Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage & Example
|
||||||
|
|
||||||
|
For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
|
||||||
|
|
||||||
|
The `Decode` function has examples associated with it there.
|
||||||
|
|
||||||
|
## But Why?!
|
||||||
|
|
||||||
|
Go offers fantastic standard libraries for decoding formats such as JSON.
|
||||||
|
The standard method is to have a struct pre-created, and populate that struct
|
||||||
|
from the bytes of the encoded format. This is great, but the problem is if
|
||||||
|
you have configuration or an encoding that changes slightly depending on
|
||||||
|
specific fields. For example, consider this JSON:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "person",
|
||||||
|
"name": "Mitchell"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Perhaps we can't populate a specific structure without first reading
|
||||||
|
the "type" field from the JSON. We could always do two passes over the
|
||||||
|
decoding of the JSON (reading the "type" first, and the rest later).
|
||||||
|
However, it is much simpler to just decode this into a `map[string]interface{}`
|
||||||
|
structure, read the "type" key, then use something like this library
|
||||||
|
to decode it into the proper structure.
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
|
||||||
|
This is a maintained fork of the original library.
|
||||||
|
|
||||||
|
Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
The project is licensed under the [MIT License](LICENSE).
|
||||||
714
vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
generated
vendored
Normal file
714
vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
generated
vendored
Normal file
@@ -0,0 +1,714 @@
|
|||||||
|
package mapstructure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// typedDecodeHook takes a raw DecodeHookFunc (an any) and turns
|
||||||
|
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||||
|
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||||
|
// Create variables here so we can reference them with the reflect pkg
|
||||||
|
var f1 DecodeHookFuncType
|
||||||
|
var f2 DecodeHookFuncKind
|
||||||
|
var f3 DecodeHookFuncValue
|
||||||
|
|
||||||
|
// Fill in the variables into this interface and the rest is done
|
||||||
|
// automatically using the reflect package.
|
||||||
|
potential := []any{f1, f2, f3}
|
||||||
|
|
||||||
|
v := reflect.ValueOf(h)
|
||||||
|
vt := v.Type()
|
||||||
|
for _, raw := range potential {
|
||||||
|
pt := reflect.ValueOf(raw).Type()
|
||||||
|
if vt.ConvertibleTo(pt) {
|
||||||
|
return v.Convert(pt).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cachedDecodeHook takes a raw DecodeHookFunc (an any) and turns
|
||||||
|
// it into a closure to be used directly
|
||||||
|
// if the type fails to convert we return a closure always erroring to keep the previous behaviour
|
||||||
|
func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (any, error) {
|
||||||
|
switch f := typedDecodeHook(raw).(type) {
|
||||||
|
case DecodeHookFuncType:
|
||||||
|
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||||
|
return f(from.Type(), to.Type(), from.Interface())
|
||||||
|
}
|
||||||
|
case DecodeHookFuncKind:
|
||||||
|
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||||
|
return f(from.Kind(), to.Kind(), from.Interface())
|
||||||
|
}
|
||||||
|
case DecodeHookFuncValue:
|
||||||
|
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||||
|
return f(from, to)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||||
|
return nil, errors.New("invalid decode hook signature")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeHookExec executes the given decode hook. This should be used
|
||||||
|
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||||
|
// that took reflect.Kind instead of reflect.Type.
|
||||||
|
func DecodeHookExec(
|
||||||
|
raw DecodeHookFunc,
|
||||||
|
from reflect.Value, to reflect.Value,
|
||||||
|
) (any, error) {
|
||||||
|
switch f := typedDecodeHook(raw).(type) {
|
||||||
|
case DecodeHookFuncType:
|
||||||
|
return f(from.Type(), to.Type(), from.Interface())
|
||||||
|
case DecodeHookFuncKind:
|
||||||
|
return f(from.Kind(), to.Kind(), from.Interface())
|
||||||
|
case DecodeHookFuncValue:
|
||||||
|
return f(from, to)
|
||||||
|
default:
|
||||||
|
return nil, errors.New("invalid decode hook signature")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||||
|
// automatically composes multiple DecodeHookFuncs.
|
||||||
|
//
|
||||||
|
// The composed funcs are called in order, with the result of the
|
||||||
|
// previous transformation.
|
||||||
|
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||||
|
cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(fs))
|
||||||
|
for _, f := range fs {
|
||||||
|
cached = append(cached, cachedDecodeHook(f))
|
||||||
|
}
|
||||||
|
return func(f reflect.Value, t reflect.Value) (any, error) {
|
||||||
|
var err error
|
||||||
|
data := f.Interface()
|
||||||
|
|
||||||
|
newFrom := f
|
||||||
|
for _, c := range cached {
|
||||||
|
data, err = c(newFrom, t)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if v, ok := data.(reflect.Value); ok {
|
||||||
|
newFrom = v
|
||||||
|
} else {
|
||||||
|
newFrom = reflect.ValueOf(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
|
||||||
|
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
|
||||||
|
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
|
||||||
|
cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(ff))
|
||||||
|
for _, f := range ff {
|
||||||
|
cached = append(cached, cachedDecodeHook(f))
|
||||||
|
}
|
||||||
|
return func(a, b reflect.Value) (any, error) {
|
||||||
|
var allErrs string
|
||||||
|
var out any
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for _, c := range cached {
|
||||||
|
out, err = c(a, b)
|
||||||
|
if err != nil {
|
||||||
|
allErrs += err.Error() + "\n"
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New(allErrs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToSliceHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// string to []string by splitting on the given sep.
|
||||||
|
func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.SliceOf(f) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
raw := data.(string)
|
||||||
|
if raw == "" {
|
||||||
|
return []string{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Split(raw, sep), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToWeakSliceHookFunc brings back the old (pre-v2) behavior of [StringToSliceHookFunc].
|
||||||
|
//
|
||||||
|
// As of mapstructure v2.0.0 [StringToSliceHookFunc] checks if the return type is a string slice.
|
||||||
|
// This function removes that check.
|
||||||
|
func StringToWeakSliceHookFunc(sep string) DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Slice {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
raw := data.(string)
|
||||||
|
if raw == "" {
|
||||||
|
return []string{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Split(raw, sep), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to time.Duration.
|
||||||
|
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(time.Duration(5)) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
d, err := time.ParseDuration(data.(string))
|
||||||
|
|
||||||
|
return d, wrapTimeParseDurationError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToTimeLocationHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to *time.Location.
|
||||||
|
func StringToTimeLocationHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(time.Local) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
d, err := time.LoadLocation(data.(string))
|
||||||
|
|
||||||
|
return d, wrapTimeParseLocationError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToURLHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to *url.URL.
|
||||||
|
func StringToURLHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(&url.URL{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
u, err := url.Parse(data.(string))
|
||||||
|
|
||||||
|
return u, wrapUrlError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToIPHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to net.IP
|
||||||
|
func StringToIPHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(net.IP{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
ip := net.ParseIP(data.(string))
|
||||||
|
if ip == nil {
|
||||||
|
return net.IP{}, fmt.Errorf("failed parsing ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to net.IPNet
|
||||||
|
func StringToIPNetHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(net.IPNet{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
_, net, err := net.ParseCIDR(data.(string))
|
||||||
|
return net, wrapNetParseError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToTimeHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to time.Time.
|
||||||
|
func StringToTimeHookFunc(layout string) DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(time.Time{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
ti, err := time.Parse(layout, data.(string))
|
||||||
|
|
||||||
|
return ti, wrapTimeParseError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
|
||||||
|
// the decoder.
|
||||||
|
//
|
||||||
|
// Note that this is significantly different from the WeaklyTypedInput option
|
||||||
|
// of the DecoderConfig.
|
||||||
|
func WeaklyTypedHook(
|
||||||
|
f reflect.Kind,
|
||||||
|
t reflect.Kind,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
dataVal := reflect.ValueOf(data)
|
||||||
|
switch t {
|
||||||
|
case reflect.String:
|
||||||
|
switch f {
|
||||||
|
case reflect.Bool:
|
||||||
|
if dataVal.Bool() {
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
return "0", nil
|
||||||
|
case reflect.Float32:
|
||||||
|
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
||||||
|
case reflect.Int:
|
||||||
|
return strconv.FormatInt(dataVal.Int(), 10), nil
|
||||||
|
case reflect.Slice:
|
||||||
|
dataType := dataVal.Type()
|
||||||
|
elemKind := dataType.Elem().Kind()
|
||||||
|
if elemKind == reflect.Uint8 {
|
||||||
|
return string(dataVal.Interface().([]uint8)), nil
|
||||||
|
}
|
||||||
|
case reflect.Uint:
|
||||||
|
return strconv.FormatUint(dataVal.Uint(), 10), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecursiveStructToMapHookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Value, t reflect.Value) (any, error) {
|
||||||
|
if f.Kind() != reflect.Struct {
|
||||||
|
return f.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var i any = struct{}{}
|
||||||
|
if t.Type() != reflect.TypeOf(&i).Elem() {
|
||||||
|
return f.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[string]any)
|
||||||
|
t.Set(reflect.ValueOf(m))
|
||||||
|
|
||||||
|
return f.Interface(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
|
||||||
|
// strings to the UnmarshalText function, when the target type
|
||||||
|
// implements the encoding.TextUnmarshaler interface
|
||||||
|
func TextUnmarshallerHookFunc() DecodeHookFuncType {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
result := reflect.New(t).Interface()
|
||||||
|
unmarshaller, ok := result.(encoding.TextUnmarshaler)
|
||||||
|
if !ok {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
str, ok := data.(string)
|
||||||
|
if !ok {
|
||||||
|
str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
|
||||||
|
}
|
||||||
|
if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to netip.Addr.
|
||||||
|
func StringToNetIPAddrHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(netip.Addr{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
addr, err := netip.ParseAddr(data.(string))
|
||||||
|
|
||||||
|
return addr, wrapNetIPParseAddrError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to netip.AddrPort.
|
||||||
|
func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(netip.AddrPort{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
addrPort, err := netip.ParseAddrPort(data.(string))
|
||||||
|
|
||||||
|
return addrPort, wrapNetIPParseAddrPortError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToNetIPPrefixHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to netip.Prefix.
|
||||||
|
func StringToNetIPPrefixHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data any,
|
||||||
|
) (any, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(netip.Prefix{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
prefix, err := netip.ParsePrefix(data.(string))
|
||||||
|
|
||||||
|
return prefix, wrapNetIPParsePrefixError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to basic types.
|
||||||
|
// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
|
||||||
|
func StringToBasicTypeHookFunc() DecodeHookFunc {
|
||||||
|
return ComposeDecodeHookFunc(
|
||||||
|
StringToInt8HookFunc(),
|
||||||
|
StringToUint8HookFunc(),
|
||||||
|
StringToInt16HookFunc(),
|
||||||
|
StringToUint16HookFunc(),
|
||||||
|
StringToInt32HookFunc(),
|
||||||
|
StringToUint32HookFunc(),
|
||||||
|
StringToInt64HookFunc(),
|
||||||
|
StringToUint64HookFunc(),
|
||||||
|
StringToIntHookFunc(),
|
||||||
|
StringToUintHookFunc(),
|
||||||
|
StringToFloat32HookFunc(),
|
||||||
|
StringToFloat64HookFunc(),
|
||||||
|
StringToBoolHookFunc(),
|
||||||
|
// byte and rune are aliases for uint8 and int32 respectively
|
||||||
|
// StringToByteHookFunc(),
|
||||||
|
// StringToRuneHookFunc(),
|
||||||
|
StringToComplex64HookFunc(),
|
||||||
|
StringToComplex128HookFunc(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToInt8HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to int8.
|
||||||
|
func StringToInt8HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
i64, err := strconv.ParseInt(data.(string), 0, 8)
|
||||||
|
return int8(i64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToUint8HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to uint8.
|
||||||
|
func StringToUint8HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
u64, err := strconv.ParseUint(data.(string), 0, 8)
|
||||||
|
return uint8(u64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToInt16HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to int16.
|
||||||
|
func StringToInt16HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
i64, err := strconv.ParseInt(data.(string), 0, 16)
|
||||||
|
return int16(i64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToUint16HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to uint16.
|
||||||
|
func StringToUint16HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
u64, err := strconv.ParseUint(data.(string), 0, 16)
|
||||||
|
return uint16(u64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToInt32HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to int32.
|
||||||
|
func StringToInt32HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
i64, err := strconv.ParseInt(data.(string), 0, 32)
|
||||||
|
return int32(i64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToUint32HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to uint32.
|
||||||
|
func StringToUint32HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
u64, err := strconv.ParseUint(data.(string), 0, 32)
|
||||||
|
return uint32(u64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToInt64HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to int64.
|
||||||
|
func StringToInt64HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
i64, err := strconv.ParseInt(data.(string), 0, 64)
|
||||||
|
return int64(i64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToUint64HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to uint64.
|
||||||
|
func StringToUint64HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
u64, err := strconv.ParseUint(data.(string), 0, 64)
|
||||||
|
return uint64(u64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToIntHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to int.
|
||||||
|
func StringToIntHookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Int {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
i64, err := strconv.ParseInt(data.(string), 0, 0)
|
||||||
|
return int(i64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToUintHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to uint.
|
||||||
|
func StringToUintHookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
u64, err := strconv.ParseUint(data.(string), 0, 0)
|
||||||
|
return uint(u64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToFloat32HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to float32.
|
||||||
|
func StringToFloat32HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
f64, err := strconv.ParseFloat(data.(string), 32)
|
||||||
|
return float32(f64), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToFloat64HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to float64.
|
||||||
|
func StringToFloat64HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
f64, err := strconv.ParseFloat(data.(string), 64)
|
||||||
|
return f64, wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToBoolHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to bool.
|
||||||
|
func StringToBoolHookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
b, err := strconv.ParseBool(data.(string))
|
||||||
|
return b, wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToByteHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to byte.
|
||||||
|
func StringToByteHookFunc() DecodeHookFunc {
|
||||||
|
return StringToUint8HookFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToRuneHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to rune.
|
||||||
|
func StringToRuneHookFunc() DecodeHookFunc {
|
||||||
|
return StringToInt32HookFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToComplex64HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to complex64.
|
||||||
|
func StringToComplex64HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
c128, err := strconv.ParseComplex(data.(string), 64)
|
||||||
|
return complex64(c128), wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToComplex128HookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to complex128.
|
||||||
|
func StringToComplex128HookFunc() DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||||
|
if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
c128, err := strconv.ParseComplex(data.(string), 128)
|
||||||
|
return c128, wrapStrconvNumError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
244
vendor/github.com/go-viper/mapstructure/v2/errors.go
generated
vendored
Normal file
244
vendor/github.com/go-viper/mapstructure/v2/errors.go
generated
vendored
Normal file
@@ -0,0 +1,244 @@
|
|||||||
|
package mapstructure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error interface is implemented by all errors emitted by mapstructure.
|
||||||
|
//
|
||||||
|
// Use [errors.As] to check if an error implements this interface.
|
||||||
|
type Error interface {
|
||||||
|
error
|
||||||
|
|
||||||
|
mapstructure()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeError is a generic error type that holds information about
|
||||||
|
// a decoding error together with the name of the field that caused the error.
|
||||||
|
type DecodeError struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDecodeError(name string, err error) *DecodeError {
|
||||||
|
return &DecodeError{
|
||||||
|
name: name,
|
||||||
|
err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DecodeError) Name() string {
|
||||||
|
return e.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DecodeError) Unwrap() error {
|
||||||
|
return e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DecodeError) Error() string {
|
||||||
|
return fmt.Sprintf("'%s' %s", e.name, e.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*DecodeError) mapstructure() {}
|
||||||
|
|
||||||
|
// ParseError is an error type that indicates a value could not be parsed
|
||||||
|
// into the expected type.
|
||||||
|
type ParseError struct {
|
||||||
|
Expected reflect.Value
|
||||||
|
Value any
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ParseError) Error() string {
|
||||||
|
return fmt.Sprintf("cannot parse value as '%s': %s", e.Expected.Type(), e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ParseError) mapstructure() {}
|
||||||
|
|
||||||
|
// UnconvertibleTypeError is an error type that indicates a value could not be
|
||||||
|
// converted to the expected type.
|
||||||
|
type UnconvertibleTypeError struct {
|
||||||
|
Expected reflect.Value
|
||||||
|
Value any
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnconvertibleTypeError) Error() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"expected type '%s', got unconvertible type '%s'",
|
||||||
|
e.Expected.Type(),
|
||||||
|
reflect.TypeOf(e.Value),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*UnconvertibleTypeError) mapstructure() {}
|
||||||
|
|
||||||
|
func wrapStrconvNumError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, ok := err.(*strconv.NumError); ok {
|
||||||
|
return &strconvNumError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type strconvNumError struct {
|
||||||
|
Err *strconv.NumError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *strconvNumError) Error() string {
|
||||||
|
return "strconv." + e.Err.Func + ": " + e.Err.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *strconvNumError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
func wrapUrlError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, ok := err.(*url.Error); ok {
|
||||||
|
return &urlError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type urlError struct {
|
||||||
|
Err *url.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *urlError) Error() string {
|
||||||
|
return fmt.Sprintf("%s", e.Err.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *urlError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
func wrapNetParseError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, ok := err.(*net.ParseError); ok {
|
||||||
|
return &netParseError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type netParseError struct {
|
||||||
|
Err *net.ParseError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *netParseError) Error() string {
|
||||||
|
return "invalid " + e.Err.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *netParseError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
func wrapTimeParseError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, ok := err.(*time.ParseError); ok {
|
||||||
|
return &timeParseError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type timeParseError struct {
|
||||||
|
Err *time.ParseError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *timeParseError) Error() string {
|
||||||
|
if e.Err.Message == "" {
|
||||||
|
return fmt.Sprintf("parsing time as %q: cannot parse as %q", e.Err.Layout, e.Err.LayoutElem)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "parsing time " + e.Err.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *timeParseError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
func wrapNetIPParseAddrError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if errMsg := err.Error(); strings.HasPrefix(errMsg, "ParseAddr") {
|
||||||
|
errPieces := strings.Split(errMsg, ": ")
|
||||||
|
|
||||||
|
return fmt.Errorf("ParseAddr: %s", errPieces[len(errPieces)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapNetIPParseAddrPortError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
errMsg := err.Error()
|
||||||
|
if strings.HasPrefix(errMsg, "invalid port ") {
|
||||||
|
return errors.New("invalid port")
|
||||||
|
} else if strings.HasPrefix(errMsg, "invalid ip:port ") {
|
||||||
|
return errors.New("invalid ip:port")
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapNetIPParsePrefixError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if errMsg := err.Error(); strings.HasPrefix(errMsg, "netip.ParsePrefix") {
|
||||||
|
errPieces := strings.Split(errMsg, ": ")
|
||||||
|
|
||||||
|
return fmt.Errorf("netip.ParsePrefix: %s", errPieces[len(errPieces)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapTimeParseDurationError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
errMsg := err.Error()
|
||||||
|
if strings.HasPrefix(errMsg, "time: unknown unit ") {
|
||||||
|
return errors.New("time: unknown unit")
|
||||||
|
} else if strings.HasPrefix(errMsg, "time: ") {
|
||||||
|
idx := strings.LastIndex(errMsg, " ")
|
||||||
|
|
||||||
|
return errors.New(errMsg[:idx])
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapTimeParseLocationError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
errMsg := err.Error()
|
||||||
|
if strings.Contains(errMsg, "unknown time zone") || strings.HasPrefix(errMsg, "time: unknown format") {
|
||||||
|
return fmt.Errorf("invalid time zone format: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
294
vendor/github.com/go-viper/mapstructure/v2/flake.lock
generated
vendored
Normal file
294
vendor/github.com/go-viper/mapstructure/v2/flake.lock
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"cachix": {
|
||||||
|
"inputs": {
|
||||||
|
"devenv": [
|
||||||
|
"devenv"
|
||||||
|
],
|
||||||
|
"flake-compat": [
|
||||||
|
"devenv"
|
||||||
|
],
|
||||||
|
"git-hooks": [
|
||||||
|
"devenv"
|
||||||
|
],
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1742042642,
|
||||||
|
"narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=",
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "cachix",
|
||||||
|
"rev": "a624d3eaf4b1d225f918de8543ed739f2f574203",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "cachix",
|
||||||
|
"ref": "latest",
|
||||||
|
"repo": "cachix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"devenv": {
|
||||||
|
"inputs": {
|
||||||
|
"cachix": "cachix",
|
||||||
|
"flake-compat": "flake-compat",
|
||||||
|
"git-hooks": "git-hooks",
|
||||||
|
"nix": "nix",
|
||||||
|
"nixpkgs": "nixpkgs_3"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1744876578,
|
||||||
|
"narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=",
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "devenv",
|
||||||
|
"rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "devenv",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-compat": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1733328505,
|
||||||
|
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-parts": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs-lib": [
|
||||||
|
"devenv",
|
||||||
|
"nix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1712014858,
|
||||||
|
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-parts_2": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs-lib": "nixpkgs-lib"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1743550720,
|
||||||
|
"narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=",
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"rev": "c621e8422220273271f52058f618c94e405bb0f5",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"git-hooks": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-compat": [
|
||||||
|
"devenv"
|
||||||
|
],
|
||||||
|
"gitignore": "gitignore",
|
||||||
|
"nixpkgs": [
|
||||||
|
"devenv",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1742649964,
|
||||||
|
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "git-hooks.nix",
|
||||||
|
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "git-hooks.nix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gitignore": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"devenv",
|
||||||
|
"git-hooks",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1709087332,
|
||||||
|
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "gitignore.nix",
|
||||||
|
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "gitignore.nix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"libgit2": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1697646580,
|
||||||
|
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
|
||||||
|
"owner": "libgit2",
|
||||||
|
"repo": "libgit2",
|
||||||
|
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "libgit2",
|
||||||
|
"repo": "libgit2",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nix": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-compat": [
|
||||||
|
"devenv"
|
||||||
|
],
|
||||||
|
"flake-parts": "flake-parts",
|
||||||
|
"libgit2": "libgit2",
|
||||||
|
"nixpkgs": "nixpkgs_2",
|
||||||
|
"nixpkgs-23-11": [
|
||||||
|
"devenv"
|
||||||
|
],
|
||||||
|
"nixpkgs-regression": [
|
||||||
|
"devenv"
|
||||||
|
],
|
||||||
|
"pre-commit-hooks": [
|
||||||
|
"devenv"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1741798497,
|
||||||
|
"narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=",
|
||||||
|
"owner": "domenkozar",
|
||||||
|
"repo": "nix",
|
||||||
|
"rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "domenkozar",
|
||||||
|
"ref": "devenv-2.24",
|
||||||
|
"repo": "nix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1733212471,
|
||||||
|
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs-lib": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1743296961,
|
||||||
|
"narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "nixpkgs.lib",
|
||||||
|
"rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "nixpkgs.lib",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1717432640,
|
||||||
|
"narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "release-24.05",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_3": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1733477122,
|
||||||
|
"narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=",
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "devenv-nixpkgs",
|
||||||
|
"rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "cachix",
|
||||||
|
"ref": "rolling",
|
||||||
|
"repo": "devenv-nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_4": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1744536153,
|
||||||
|
"narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixpkgs-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"devenv": "devenv",
|
||||||
|
"flake-parts": "flake-parts_2",
|
||||||
|
"nixpkgs": "nixpkgs_4"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
||||||
46
vendor/github.com/go-viper/mapstructure/v2/flake.nix
generated
vendored
Normal file
46
vendor/github.com/go-viper/mapstructure/v2/flake.nix
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||||
|
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||||
|
devenv.url = "github:cachix/devenv";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs =
|
||||||
|
inputs@{ flake-parts, ... }:
|
||||||
|
flake-parts.lib.mkFlake { inherit inputs; } {
|
||||||
|
imports = [
|
||||||
|
inputs.devenv.flakeModule
|
||||||
|
];
|
||||||
|
|
||||||
|
systems = [
|
||||||
|
"x86_64-linux"
|
||||||
|
"x86_64-darwin"
|
||||||
|
"aarch64-darwin"
|
||||||
|
];
|
||||||
|
|
||||||
|
perSystem =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
rec {
|
||||||
|
devenv.shells = {
|
||||||
|
default = {
|
||||||
|
languages = {
|
||||||
|
go.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
pre-commit.hooks = {
|
||||||
|
nixpkgs-fmt.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
packages = with pkgs; [
|
||||||
|
golangci-lint
|
||||||
|
];
|
||||||
|
|
||||||
|
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
|
||||||
|
containers = pkgs.lib.mkForce { };
|
||||||
|
};
|
||||||
|
|
||||||
|
ci = devenv.shells.default;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
11
vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
generated
vendored
Normal file
11
vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package errors
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
func New(text string) error {
|
||||||
|
return errors.New(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func As(err error, target interface{}) bool {
|
||||||
|
return errors.As(err, target)
|
||||||
|
}
|
||||||
9
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
generated
vendored
Normal file
9
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build go1.20
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
func Join(errs ...error) error {
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
||||||
61
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
generated
vendored
Normal file
61
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
//go:build !go1.20
|
||||||
|
|
||||||
|
// Copyright 2022 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
// Join returns an error that wraps the given errors.
|
||||||
|
// Any nil error values are discarded.
|
||||||
|
// Join returns nil if every value in errs is nil.
|
||||||
|
// The error formats as the concatenation of the strings obtained
|
||||||
|
// by calling the Error method of each element of errs, with a newline
|
||||||
|
// between each string.
|
||||||
|
//
|
||||||
|
// A non-nil error returned by Join implements the Unwrap() []error method.
|
||||||
|
func Join(errs ...error) error {
|
||||||
|
n := 0
|
||||||
|
for _, err := range errs {
|
||||||
|
if err != nil {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
e := &joinError{
|
||||||
|
errs: make([]error, 0, n),
|
||||||
|
}
|
||||||
|
for _, err := range errs {
|
||||||
|
if err != nil {
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
type joinError struct {
|
||||||
|
errs []error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *joinError) Error() string {
|
||||||
|
// Since Join returns nil if every value in errs is nil,
|
||||||
|
// e.errs cannot be empty.
|
||||||
|
if len(e.errs) == 1 {
|
||||||
|
return e.errs[0].Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := []byte(e.errs[0].Error())
|
||||||
|
for _, err := range e.errs[1:] {
|
||||||
|
b = append(b, '\n')
|
||||||
|
b = append(b, err.Error()...)
|
||||||
|
}
|
||||||
|
// At this point, b has at least one byte '\n'.
|
||||||
|
// return unsafe.String(&b[0], len(b))
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *joinError) Unwrap() []error {
|
||||||
|
return e.errs
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user