diff --git a/.golangci.yaml b/.golangci.yaml
index 204e06c5..e763ed69 100644
--- a/.golangci.yaml
+++ b/.golangci.yaml
@@ -152,7 +152,7 @@ linters:
- wsl
- gomnd
- godox
- - goerr113
+ - err113
- testpackage
- godot
- interfacer
diff --git a/bridge/mattermost/mattermost.go b/bridge/mattermost/mattermost.go
index a0496644..4967a63c 100644
--- a/bridge/mattermost/mattermost.go
+++ b/bridge/mattermost/mattermost.go
@@ -1,6 +1,7 @@
package mattermost
import (
+ "context"
"encoding/json"
"errors"
"fmt"
@@ -14,7 +15,7 @@ import (
lru "github.com/hashicorp/golang-lru"
prefixed "github.com/matterbridge/logrus-prefixed-formatter"
"github.com/matterbridge/matterclient"
- "github.com/mattermost/mattermost-server/v6/model"
+ "github.com/mattermost/mattermost/server/public/model"
"github.com/mitchellh/mapstructure"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
@@ -201,7 +202,7 @@ func (m *Mattermost) checkWsActionMessage(rmsg *model.WebSocketEvent, throttle *
}
func (m *Mattermost) Invite(channelID, username string) error {
- _, _, err := m.mc.Client.AddChannelMember(channelID, username)
+ _, _, err := m.mc.Client.AddChannelMember(context.TODO(), channelID, username)
if err != nil {
return err
}
@@ -214,7 +215,7 @@ func (m *Mattermost) Join(channelName string) (string, string, error) {
sp := strings.Split(channelName, "/")
if len(sp) > 1 {
- team, _, _ := m.mc.Client.GetTeamByName(sp[0], "")
+ team, _, _ := m.mc.Client.GetTeamByName(context.TODO(), sp[0], "")
if team == nil {
return "", "", fmt.Errorf("cannot join channel (+i)")
}
@@ -262,9 +263,9 @@ func (m *Mattermost) List() (map[string]string, error) {
}
func (m *Mattermost) Part(channelID string) error {
- m.mc.Client.RemoveUserFromChannel(channelID, m.mc.User.Id)
+ _, err := m.mc.Client.RemoveUserFromChannel(context.TODO(), channelID, m.mc.User.Id)
- return nil
+ return err
}
func (m *Mattermost) UpdateChannels() error {
@@ -302,7 +303,7 @@ func (m *Mattermost) MsgUser(userID, text string) (string, error) {
func (m *Mattermost) MsgUserThread(userID, parentID, text string) (string, error) {
// create DM channel (only happens on first message)
- dchannel, _, err := m.mc.Client.CreateDirectChannel(m.mc.User.Id, userID)
+ dchannel, _, err := m.mc.Client.CreateDirectChannel(context.TODO(), m.mc.User.Id, userID)
if err != nil {
return "", err
}
@@ -329,7 +330,7 @@ func (m *Mattermost) MsgChannelThread(channelID, parentID, text string) (string,
post.SetProps(props)
- rp, _, err := m.mc.Client.CreatePost(post)
+ rp, _, err := m.mc.Client.CreatePost(context.TODO(), post)
if err == nil {
return rp.Id, nil
}
@@ -339,7 +340,7 @@ func (m *Mattermost) MsgChannelThread(channelID, parentID, text string) (string,
}
// Try to work out if we're trying to reply to a post within a thread.
- replyPost, _, err := m.mc.Client.GetPost(parentID, "")
+ replyPost, _, err := m.mc.Client.GetPost(context.TODO(), parentID, "")
if err != nil {
return "", err
}
@@ -352,7 +353,7 @@ func (m *Mattermost) MsgChannelThread(channelID, parentID, text string) (string,
post.SetProps(props)
- rp, _, err = m.mc.Client.CreatePost(post)
+ rp, _, err = m.mc.Client.CreatePost(context.TODO(), post)
if err == nil {
return rp.Id, nil
}
@@ -362,7 +363,7 @@ func (m *Mattermost) MsgChannelThread(channelID, parentID, text string) (string,
func (m *Mattermost) ModifyPost(msgID, text string) error {
if text == "" {
- _, err := m.mc.Client.DeletePost(msgID)
+ _, err := m.mc.Client.DeletePost(context.TODO(), msgID)
if err != nil {
return err
}
@@ -370,7 +371,7 @@ func (m *Mattermost) ModifyPost(msgID, text string) error {
return nil
}
- _, _, err := m.mc.Client.PatchPost(msgID, &model.PostPatch{
+ _, _, err := m.mc.Client.PatchPost(context.TODO(), msgID, &model.PostPatch{
Message: &text,
})
if err != nil {
@@ -389,7 +390,7 @@ func (m *Mattermost) AddReaction(msgID, emoji string) error {
CreateAt: 0,
}
- _, _, err := m.mc.Client.SaveReaction(reaction)
+ _, _, err := m.mc.Client.SaveReaction(context.TODO(), reaction)
if err != nil {
return err
}
@@ -406,7 +407,7 @@ func (m *Mattermost) RemoveReaction(msgID, emoji string) error {
CreateAt: 0,
}
- _, err := m.mc.Client.DeleteReaction(reaction)
+ _, err := m.mc.Client.DeleteReaction(context.TODO(), reaction)
if err != nil {
return err
}
@@ -424,7 +425,7 @@ func (m *Mattermost) SetTopic(channelID, text string) error {
Header: &text,
}
- _, _, err := m.mc.Client.PatchChannel(channelID, patch)
+ _, _, err := m.mc.Client.PatchChannel(context.TODO(), channelID, patch)
if err != nil {
return err
}
@@ -445,7 +446,7 @@ func (m *Mattermost) Protocol() string {
}
func (m *Mattermost) Kick(channelID, username string) error {
- _, err := m.mc.Client.RemoveUserFromChannel(channelID, username)
+ _, err := m.mc.Client.RemoveUserFromChannel(context.TODO(), channelID, username)
if err != nil {
return err
}
@@ -454,7 +455,7 @@ func (m *Mattermost) Kick(channelID, username string) error {
}
func (m *Mattermost) SetStatus(status string) error {
- _, _, err := m.mc.Client.UpdateUserStatus(m.mc.User.Id, &model.Status{
+ _, _, err := m.mc.Client.UpdateUserStatus(context.TODO(), m.mc.User.Id, &model.Status{
Status: status,
UserId: m.mc.User.Id,
})
@@ -521,7 +522,7 @@ func (m *Mattermost) GetChannelUsers(channelID string) ([]*bridge.UserInfo, erro
max := 200
for {
- mmusersPaged, resp, err = m.mc.Client.GetUsersInChannel(channelID, idx, max, "")
+ mmusersPaged, resp, err = m.mc.Client.GetUsersInChannel(context.TODO(), channelID, idx, max, "")
if err == nil {
break
}
@@ -533,7 +534,7 @@ func (m *Mattermost) GetChannelUsers(channelID string) ([]*bridge.UserInfo, erro
for len(mmusersPaged) > 0 {
for {
- mmusersPaged, resp, err = m.mc.Client.GetUsersInChannel(channelID, idx, max, "")
+ mmusersPaged, resp, err = m.mc.Client.GetUsersInChannel(context.TODO(), channelID, idx, max, "")
if err == nil {
idx++
time.Sleep(time.Millisecond * 200)
@@ -611,7 +612,7 @@ func (m *Mattermost) GetChannel(channelID string) (*bridge.ChannelInfo, error) {
}
// Fallback if it's not found in the cache.
- mmchannel, _, err := m.mc.Client.GetChannel(channelID, "")
+ mmchannel, _, err := m.mc.Client.GetChannel(context.TODO(), channelID, "")
if err != nil {
return nil, errors.New("channel not found")
}
@@ -634,7 +635,7 @@ func (m *Mattermost) GetMe() *bridge.UserInfo {
func (m *Mattermost) GetUserByUsername(username string) *bridge.UserInfo {
for {
- mmuser, resp, err := m.mc.Client.GetUserByUsername(username, "")
+ mmuser, resp, err := m.mc.Client.GetUserByUsername(context.TODO(), username, "")
if err == nil {
return m.createUser(mmuser)
}
@@ -818,10 +819,10 @@ func (m *Mattermost) addParentMsg(parentID string, msg string, newLen int, uncou
// Search and use cached reply if it exists.
// None found, so we'll need to create one and save it for future uses.
if v, ok := m.msgParentCache.Get(parentID); !ok {
- parentPost, _, err := m.mc.Client.GetPost(parentID, "")
+ parentPost, _, err := m.mc.Client.GetPost(context.TODO(), parentID, "")
// Retry once on failure.
if err != nil {
- parentPost, _, err = m.mc.Client.GetPost(parentID, "")
+ parentPost, _, err = m.mc.Client.GetPost(context.TODO(), parentID, "")
}
if err != nil {
return msg, err
@@ -980,7 +981,7 @@ func (m *Mattermost) handleWsActionPost(rmsg *model.WebSocketEvent) {
Text: msg,
ChannelID: data.ChannelId,
MessageID: data.Id,
- Event: rmsg.EventType(),
+ Event: string(rmsg.EventType()),
ParentID: data.RootId,
}
@@ -1020,7 +1021,7 @@ func (m *Mattermost) handleWsActionPost(rmsg *model.WebSocketEvent) {
MessageType: messageType,
ChannelType: channelType,
MessageID: data.Id,
- Event: rmsg.EventType(),
+ Event: string(rmsg.EventType()),
ParentID: data.RootId,
},
}
@@ -1047,7 +1048,7 @@ func (m *Mattermost) handleWsActionPost(rmsg *model.WebSocketEvent) {
Sender: ghost,
ChannelType: channelType,
MessageID: data.Id,
- Event: rmsg.EventType(),
+ Event: string(rmsg.EventType()),
ParentID: data.RootId,
},
}
@@ -1328,7 +1329,7 @@ func (m *Mattermost) handleReactionEvent(rmsg *model.WebSocketEvent) {
}
parentID := reaction.PostId
- parentPost, _, err := m.mc.Client.GetPost(reaction.PostId, "")
+ parentPost, _, err := m.mc.Client.GetPost(context.TODO(), reaction.PostId, "")
if err == nil {
parentID = parentPost.RootId
}
@@ -1392,7 +1393,7 @@ func (m *Mattermost) UpdateLastViewed(channelID string) {
func (m *Mattermost) UpdateLastViewedUser(userID string) error {
for {
- dc, resp, err := m.mc.Client.CreateDirectChannel(m.mc.User.Id, userID)
+ dc, resp, err := m.mc.Client.CreateDirectChannel(context.TODO(), m.mc.User.Id, userID)
if err == nil {
return m.mc.UpdateLastViewed(dc.Id)
}
@@ -1412,7 +1413,7 @@ func (m *Mattermost) GetFileLinks(fileIDs []string) []string {
}
func (m *Mattermost) SearchUsers(query string) ([]*bridge.UserInfo, error) {
- users, _, err := m.mc.Client.SearchUsers(&model.UserSearch{Term: query})
+ users, _, err := m.mc.Client.SearchUsers(context.TODO(), &model.UserSearch{Term: query})
if err != nil {
return nil, err
}
diff --git a/bridge/slack/slack.go b/bridge/slack/slack.go
index 6b240d05..54eafeb9 100644
--- a/bridge/slack/slack.go
+++ b/bridge/slack/slack.go
@@ -690,7 +690,9 @@ func (s *Slack) getSlackUserFromMessage(rmsg *slack.MessageEvent) (*slack.User,
}
if rmsg.Username == "" {
- bot, err := s.rtm.GetBotInfo(rmsg.BotID)
+ bot, err := s.rtm.GetBotInfo(slack.GetBotInfoParameters{
+ Bot: rmsg.BotID,
+ })
if err != nil {
suser.Profile.DisplayName = "bot"
suser.Name = "bot"
diff --git a/go.mod b/go.mod
index f058abdf..006a76c0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,90 +1,90 @@
module github.com/42wim/matterircd
require (
- github.com/alecthomas/chroma/v2 v2.9.1
+ github.com/alecthomas/chroma/v2 v2.14.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f
github.com/google/gops v0.3.27
- github.com/grokify/html-strip-tags-go v0.0.1
+ github.com/grokify/html-strip-tags-go v0.1.0
github.com/hashicorp/golang-lru v0.6.0
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
- github.com/matterbridge/matterclient v0.0.0-20230909230346-007c5c33c54c
- github.com/mattermost/mattermost-server/v6 v6.7.2
+ github.com/matterbridge/matterclient v0.0.0-20240523235056-57f299489168
+ github.com/mattermost/mattermost/server/public v0.1.3
github.com/mattn/go-mastodon v0.0.6
github.com/mitchellh/mapstructure v1.5.0
github.com/muesli/reflow v0.3.0
github.com/sirupsen/logrus v1.9.3
- github.com/slack-go/slack v0.12.3
+ github.com/slack-go/slack v0.13.0
github.com/sorcix/irc v1.1.4
github.com/spf13/pflag v1.0.5
- github.com/spf13/viper v1.17.0
+ github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.8.4
- go.etcd.io/bbolt v1.3.7
+ go.etcd.io/bbolt v1.3.10
)
require (
- github.com/blang/semver v3.5.1+incompatible // indirect
- github.com/dlclark/regexp2 v1.10.0 // indirect
- github.com/dustin/go-humanize v1.0.0 // indirect
- github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09 // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/dlclark/regexp2 v1.11.0 // indirect
+ github.com/dyatlov/go-opengraph/opengraph v0.0.0-20220524092352-606d7b1e5f8a // indirect
+ github.com/fatih/color v1.16.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
- github.com/fsnotify/fsnotify v1.6.0 // indirect
- github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect
- github.com/google/uuid v1.3.0 // indirect
- github.com/gorilla/websocket v1.5.0 // indirect
- github.com/graph-gophers/graphql-go v1.3.0 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-hclog v1.6.2 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/go-plugin v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/hashicorp/yamux v0.1.1 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
- github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.0 // indirect
- github.com/klauspost/cpuid/v2 v2.0.12 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
- github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
- github.com/mattermost/logr/v2 v2.0.15 // indirect
+ github.com/mattermost/ldap v0.0.0-20231116144001-0f480c025956 // indirect
+ github.com/mattermost/logr/v2 v2.0.21 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
- github.com/minio/md5-simd v1.1.2 // indirect
- github.com/minio/minio-go/v7 v7.0.24 // indirect
- github.com/minio/sha256-simd v1.0.0 // indirect
- github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/mitchellh/go-testing-interface v1.14.1 // indirect
+ github.com/nxadm/tail v1.4.8 // indirect
+ github.com/oklog/run v1.1.0 // indirect
github.com/pborman/uuid v1.2.1 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
- github.com/philhofer/fwd v1.1.1 // indirect
+ github.com/philhofer/fwd v1.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
- github.com/rs/xid v1.4.0 // indirect
- github.com/sagikazarmark/locafero v0.3.0 // indirect
+ github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
- github.com/spf13/afero v1.10.0 // indirect
- github.com/spf13/cast v1.5.1 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
+ github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
- github.com/tinylib/msgp v1.1.6 // indirect
+ github.com/tinylib/msgp v1.1.9 // indirect
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect
- github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
+ github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
- github.com/wiggin77/merror v1.0.3 // indirect
+ github.com/wiggin77/merror v1.0.5 // indirect
github.com/wiggin77/srslog v1.0.1 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/crypto v0.13.0 // indirect
+ golang.org/x/crypto v0.22.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
- golang.org/x/net v0.15.0 // indirect
- golang.org/x/sys v0.12.0 // indirect
- golang.org/x/term v0.12.0 // indirect
- golang.org/x/text v0.13.0 // indirect
+ golang.org/x/net v0.24.0 // indirect
+ golang.org/x/sys v0.19.0 // indirect
+ golang.org/x/term v0.19.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
+ google.golang.org/grpc v1.62.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
-go 1.19
+go 1.22.0
diff --git a/go.sum b/go.sum
index 368d466e..32339430 100644
--- a/go.sum
+++ b/go.sum
@@ -1,1254 +1,215 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
-cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
-cloud.google.com/go v0.88.0/go.mod h1:dnKwfYbP9hQhefiUvpbcAyoGSHUrOxR20JVElLiUvEY=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/spanner v1.24.0/go.mod h1:EZI0yH1D/PrXK0XH9Ba5LGXTXWeqZv0ClOD/19a0Z58=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
-code.sajari.com/docconv v1.2.0/go.mod h1:r8yfCP6OKbZ9Xkd87aBa4nfpk6ud/PoyLwex3n6cXSc=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
-gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
-github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
-github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
-github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
-github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
-github.com/JalfResi/justext v0.0.0-20170829062021-c0282dea7198/go.mod h1:0SURuH1rsE8aVWvutuMZghRNrNrYEUzibzJfhEYR8L0=
-github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
-github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/squirrel v1.5.2/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/goquery v1.4.1/go.mod h1:T9ezsOHcCrDCgA8aF1Cqr3sSYbO/xgdy8/R/XiIMAhA=
-github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
-github.com/PuerkitoBio/goquery v1.8.0/go.mod h1:ypIiRMtY7COPGk+I/YbZLbxsxn9g5ejnI2HSMtkjZvI=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
-github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/advancedlogic/GoOse v0.0.0-20191112112754-e742535969c1/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w=
-github.com/advancedlogic/GoOse v0.0.0-20210820140952-9d5822d4a625/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w=
-github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
-github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink=
-github.com/alecthomas/chroma/v2 v2.9.1 h1:0O3lTQh9FxazJ4BYE/MOi/vDGuHn7B+6Bu902N2UZvU=
-github.com/alecthomas/chroma/v2 v2.9.1/go.mod h1:4TQu7gdfuPjSh76j78ietmqh9LiurGF0EpseFXdKMBw=
-github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
-github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
-github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
-github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
-github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
-github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY=
-github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
+github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
+github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
+github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
+github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
+github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
+github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY=
-github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
-github.com/araddon/dateparse v0.0.0-20180729174819-cfd92a431d0e/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI=
-github.com/araddon/dateparse v0.0.0-20200409225146-d820a6159ab1/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI=
-github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1/go.mod h1:noBAuukeYOXa0aXGqxr24tADqkwDO2KRD15FsuaZ5a8=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0=
-github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
-github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU=
-github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
-github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM=
-github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI=
-github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo=
-github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
-github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs=
-github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
-github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
-github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
-github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
-github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
-github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blevesearch/bleve/v2 v2.3.2/go.mod h1:96+xE5pZUOsr3Y4vHzV1cBC837xZCpwLlX0hrrxnvIg=
-github.com/blevesearch/bleve_index_api v1.0.1/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4=
-github.com/blevesearch/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:9eJDeqxJ3E7WnLebQUlPD7ZjSce7AnDb9vjGmMCbD0A=
-github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
-github.com/blevesearch/goleveldb v1.0.1/go.mod h1:WrU8ltZbIp0wAoig/MHbrPCXSOLpe79nz5lv5nqfYrQ=
-github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk=
-github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA=
-github.com/blevesearch/mmap-go v1.0.3/go.mod h1:pYvKl/grLQrBxuaRYgoTssa4rVujYYeenDp++2E+yvs=
-github.com/blevesearch/scorch_segment_api/v2 v2.1.0/go.mod h1:uch7xyyO/Alxkuxa+CGs79vw0QY8BENSBjg6Mw5L5DE=
-github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ=
-github.com/blevesearch/snowball v0.6.1/go.mod h1:ZF0IBg5vgpeoUhnMza2v0A/z8m1cWPlwhke08LpNusg=
-github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
-github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q=
-github.com/blevesearch/vellum v1.0.7/go.mod h1:doBZpmRhwTsASB4QdUZANlJvqVAUdUyX0ZK7QJCTeBE=
-github.com/blevesearch/zapx/v11 v11.3.3/go.mod h1:YzTfUm4kS3e8OmTXDHVV8OzC5MWPO/VPJZQgPNVb4Lc=
-github.com/blevesearch/zapx/v12 v12.3.3/go.mod h1:RMl6lOZqF+sTxKvhQDJ5yK2LT3Mu7E2p/jGdjAaiRxs=
-github.com/blevesearch/zapx/v13 v13.3.3/go.mod h1:eppobNM35U4C22yDvTuxV9xPqo10pwfP/jugL4INWG4=
-github.com/blevesearch/zapx/v14 v14.3.3/go.mod h1:zXNcVzukh0AvG57oUtT1T0ndi09H0kELNaNmekEy0jw=
-github.com/blevesearch/zapx/v15 v15.3.3/go.mod h1:C+f/97ZzTzK6vt/7sVlZdzZxKu+5+j4SrGCvr9dJzaY=
-github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
-github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
+github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
-github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k=
-github.com/couchbase/moss v0.2.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
-github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
-github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/dgoogauth v0.0.0-20190221195224-5a805980a5f3/go.mod h1:hEfFauPHz7+NnjR/yHJGhrKo1Za+zStgwUETx3yzqgY=
-github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dhui/dktest v0.3.7/go.mod h1:nYMOkafiA07WchSwKnKFUSbGMb2hMm5DrCGiXYG6gwM=
-github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
-github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0=
-github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
-github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
+github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09 h1:AQLr//nh20BzN3hIWj2+/Gt3FwSs8Nwo/nz4hMIcLPg=
-github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09/go.mod h1:nYia/MIs9OyvXXYboPmNOj0gVWo97Wx0sde+ZuKkoM4=
-github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/dyatlov/go-opengraph/opengraph v0.0.0-20220524092352-606d7b1e5f8a h1:etIrTD8BQqzColk9nKRusM9um5+1q0iOEJLqfBMIK64=
+github.com/dyatlov/go-opengraph/opengraph v0.0.0-20220524092352-606d7b1e5f8a/go.mod h1:emQhSYTXqB0xxjLITTw4EaWZ+8IIQYw+kx9GqNUKdLg=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI=
-github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
-github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
-github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
-github.com/getsentry/sentry-go v0.13.0/go.mod h1:EOsfu5ZdvKPfeHYV6pTVQnsjfp30+XA7//UooKNumH0=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gigawattio/window v0.0.0-20180317192513-0f5467e35573/go.mod h1:eBvb3i++NHDH4Ugo9qCvMw8t0mTSctaEa5blJbWcNxs=
-github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
-github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/go-asn1-ber/asn1-ber v1.3.2-0.20191121212151-29be175fc3a3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
-github.com/go-asn1-ber/asn1-ber v1.5.3 h1:u7utq56RUFiynqUzgVMFDymapcOtQ/MZkh3H4QYkxag=
-github.com/go-asn1-ber/asn1-ber v1.5.3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
-github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
+github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA=
+github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
-github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
-github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
-github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
-github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
-github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
-github.com/go-redis/redis/v8 v8.0.0/go.mod h1:isLoQT/NFSP7V67lyvM9GmdvLdyZ7pEhsXvvyQtnQTo=
-github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
-github.com/go-resty/resty/v2 v2.0.0/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
-github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU=
-github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
-github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
-github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
-github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
-github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
-github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
-github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
-github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
-github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
-github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
-github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
-github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
-github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
-github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
-github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
-github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
-github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
-github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
-github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
-github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
-github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
-github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
-github.com/golang-migrate/migrate/v4 v4.15.1/go.mod h1:/CrBenUbcDqsW29jGTR/XFqCfVi/Y6mHXlooCcSOJMQ=
-github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
-github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
-github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
-github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
-github.com/google/go-github/v35 v35.2.0/go.mod h1:s0515YVTI+IMrDoy9Y4pHt9ShGpzHvHO8rZ7L7acgvs=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gops v0.3.27 h1:BDdWfedShsBbeatZ820oA4DbVOC8yJ4NI8xAlDFWfgI=
github.com/google/gops v0.3.27/go.mod h1:lYqabmfnq4Q6UumWNx96Hjup5BDAVc8zmfIy0SkNCSk=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210715191844-86eeefc3e471/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20220221023154-0b2280d3ff96/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
-github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/graph-gophers/dataloader/v6 v6.0.0/go.mod h1:J15OZSnOoZgMkijpbZcwCmglIDYqlUiTEE1xLPbyqZM=
-github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
-github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
+github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
+github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grokify/html-strip-tags-go v0.0.1 h1:0fThFwLbW7P/kOiTBs03FsJSV9RM2M/Q/MOnCQxKMo0=
-github.com/grokify/html-strip-tags-go v0.0.1/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grokify/html-strip-tags-go v0.1.0 h1:03UrQLjAny8xci+R+qjCce/MYnpNXCtgzltlQbOBae4=
+github.com/grokify/html-strip-tags-go v0.1.0/go.mod h1:ZdzgfHEzAfz9X6Xe5eBLVblWIxXfYSQ40S/VKrAOGpc=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8RYb1Y7fYivughjxojTmIu5iAIjSrSLCLeqE=
-github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
-github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b/go.mod h1:VzxiSdG6j1pi7rwGm/xYI5RbtpBgM8sARDXlvEvxlu0=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I=
+github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A=
+github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
-github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
+github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
+github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
-github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
-github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
-github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
-github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
-github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
-github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
-github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
-github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
-github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
-github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
-github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
-github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
-github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
-github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
-github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds=
-github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
-github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
-github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
-github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
-github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
-github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
-github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
-github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
-github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
-github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
-github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
-github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
-github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
-github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
-github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
-github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
-github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
-github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
-github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
-github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
-github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
-github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
-github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA=
-github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jaytaylor/html2text v0.0.0-20180606194806-57d518f124b0/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk=
-github.com/jaytaylor/html2text v0.0.0-20200412013138-3577fbdbcff7/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk=
-github.com/jaytaylor/html2text v0.0.0-20211105163654-bc68cce691ba/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
-github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
-github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
-github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
-github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
-github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/jonboulle/clockwork v0.2.3/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c=
+github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
-github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
-github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
-github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
-github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
-github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
-github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
-github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
-github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
-github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
-github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE=
-github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
-github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4=
-github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
-github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
-github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
-github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
-github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/levigross/exp-html v0.0.0-20120902181939-8df60c69a8f5/go.mod h1:QMe2wuKJ0o7zIVE8AqiT8rd8epmm6WDIZ2wyuBqYPzM=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
-github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
-github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba h1:XleOY4IjAEIcxAh+IFwT5JT5Ze3RHiYz6m+4ZfZ0rc0=
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba/go.mod h1:iXGEotOvwI1R1SjLxRc+BF5rUORTMtE0iMZBT2lxqAU=
-github.com/matterbridge/matterclient v0.0.0-20230909230346-007c5c33c54c h1:GFIa+6bvShSxC6XDxUFkZacOFi2x9KfXpvWqlyM0ppo=
-github.com/matterbridge/matterclient v0.0.0-20230909230346-007c5c33c54c/go.mod h1:Zg8PH1P/1CNUxozQ8blnjAV9PA4Qn2qWf33cX5yNKGM=
+github.com/matterbridge/matterclient v0.0.0-20240523235056-57f299489168 h1:tbrHr4V6PYCM9uOd4kGvzpeSFcVziQBZUlRfcgAI0Bw=
+github.com/matterbridge/matterclient v0.0.0-20240523235056-57f299489168/go.mod h1:quMZkUhnTp/YfbJU6sCgD6qVSPgH1Uxig9/AjgJQHF8=
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 h1:Khvh6waxG1cHc4Cz5ef9n3XVCxRWpAKUtqg9PJl5+y8=
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34=
-github.com/mattermost/gosaml2 v0.3.3/go.mod h1:Z429EIOiEi9kbq6yHoApfzlcXpa6dzRDc6pO+Vy2Ksk=
-github.com/mattermost/gziphandler v0.0.1/go.mod h1:CvvZR7sXqhj81V2swXuQY7T04Ccc89u7W7pHNPKev8g=
-github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d h1:/RJ/UV7M5c7L2TQ0KNm4yZxxFvC1nvRz/gY/Daa35aI=
-github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d/go.mod h1:HLbgMEI5K131jpxGazJ97AxfPDt31osq36YS1oxFQPQ=
-github.com/mattermost/logr/v2 v2.0.15 h1:+WNbGcsc3dBao65eXlceB6dTILNJRIrvubnsTl3zBew=
-github.com/mattermost/logr/v2 v2.0.15/go.mod h1:mpPp935r5dIkFDo2y9Q87cQWhFR/4xXpNh0k/y8Hmwg=
-github.com/mattermost/mattermost-server/v6 v6.7.2 h1:rRss2/R5LNbyc/P1OA4kSWuVq+rmnxwepuwGpTwL+U4=
-github.com/mattermost/mattermost-server/v6 v6.7.2/go.mod h1:b/iDf7Jn2Pd2jWGzaznoVNT811JZpemdmNGP7M/a7Ao=
-github.com/mattermost/morph v0.0.0-20220401091636-39f834798da8/go.mod h1:jxM3g1bx+k2Thz7jofcHguBS8TZn5Pc+o5MGmORObhw=
-github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
-github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattermost/ldap v0.0.0-20231116144001-0f480c025956 h1:Y1Tu/swM31pVwwb2BTCsOdamENjjWCI6qmfHLbk6OZI=
+github.com/mattermost/ldap v0.0.0-20231116144001-0f480c025956/go.mod h1:SRl30Lb7/QoYyohYeVBuqYvvmXSZJxZgiV3Zf6VbxjI=
+github.com/mattermost/logr/v2 v2.0.21 h1:CMHsP+nrbRlEC4g7BwOk1GAnMtHkniFhlSQPXy52be4=
+github.com/mattermost/logr/v2 v2.0.21/go.mod h1:kZkB/zqKL9e+RY5gB3vGpsyenC+TpuiOenjMkvJJbzc=
+github.com/mattermost/mattermost/server/public v0.1.3 h1:A3hQ3rNCwHfKAVxe7Hk3Zd9p2pyUKRmxtRPnkWP5SFM=
+github.com/mattermost/mattermost/server/public v0.1.3/go.mod h1:PDPb/iqzJJ5ZvK/m70oDF55AXN/cOvVFj96Yu4e6j+Q=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
-github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-mastodon v0.0.6 h1:lqU1sOeeIapaDsDUL6udDZIzMb2Wqapo347VZlaOzf0=
github.com/mattn/go-mastodon v0.0.6/go.mod h1:cg7RFk2pcUfHZw/IvKe1FUzmlq5KnLFqs7eV2PHplV8=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
-github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
-github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
-github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
-github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
-github.com/microcosm-cc/bluemonday v1.0.18/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
-github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
-github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
-github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.24 h1:HPlHiET6L5gIgrHRaw1xFo1OaN4bEP/082asWh3WJtI=
-github.com/minio/minio-go/v7 v7.0.24/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg=
-github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
-github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
-github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
-github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
-github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
-github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
-github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
-github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
-github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI=
-github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
-github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
-github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
-github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
-github.com/oov/psd v0.0.0-20220121172623-5db5eafcecbb/go.mod h1:GHI1bnmAcbp96z6LNfBJvtrjxhaXGkbsk967utPlvL8=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
-github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
-github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
-github.com/otiai10/gosseract/v2 v2.2.4/go.mod h1:ahOp/kHojnOMGv1RaUnR0jwY5JVa6BYKhYAS8nbMLSo=
-github.com/otiai10/gosseract/v2 v2.3.1/go.mod h1:2ZOGgdTIXQzCS5f+N1HkcXRgDX6K3ZoYe3Yvo++cpp4=
-github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
-github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
-github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
-github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
-github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
-github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
-github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
-github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
-github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
+github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
-github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/reflog/dateconstraints v0.2.1/go.mod h1:Ax8AxTBcJc3E/oVS2hd2j7RDM/5MDtuPwuR7lIHtPLo=
-github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/richardlehane/mscfb v1.0.3/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
-github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
-github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
-github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
-github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
-github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
-github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
-github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
-github.com/rudderlabs/analytics-go v3.3.2+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30=
-github.com/russellhaering/goxmldsig v1.2.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
-github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ=
-github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U=
+github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
+github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
-github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
-github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
-github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
@@ -1258,7 +219,6 @@ github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9A
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
@@ -1266,1037 +226,177 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
-github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
-github.com/simplereach/timeutils v1.2.0/go.mod h1:VVbQDfN/FHRZa1LSqcwo4kNZ62OOyqLLGQKYB3pB0Q8=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/slack-go/slack v0.12.3 h1:92/dfFU8Q5XP6Wp5rr5/T5JHLM5c5Smtn53fhToAP88=
-github.com/slack-go/slack v0.12.3/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/slack-go/slack v0.13.0 h1:7my/pR2ubZJ9912p9FtvALYpbt0cQPAqkRy2jaSI1PQ=
+github.com/slack-go/slack v0.13.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/sorcix/irc v1.1.4 h1:KDmVMPPzK4kbf3TQw1RsZAqTsh2JL9Zw69hYduX9Ykw=
github.com/sorcix/irc v1.1.4/go.mod h1:MhzbySH63tDknqfvAAFK3ps/942g4z9EeJ/4lGgHyZc=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
-github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
-github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
-github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI=
-github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI=
-github.com/splitio/go-client/v6 v6.1.0/go.mod h1:CEGAEFT99Fwb32ZIRcnZoXTMXddtB6IIpTmt3RP8mnM=
-github.com/splitio/go-split-commons/v3 v3.1.0/go.mod h1:29NCy20oAS4ZMy4qkwTd6277eieVDonx4V/aeDU/wUQ=
-github.com/splitio/go-toolkit/v4 v4.2.0/go.mod h1:EdIHN0yzB1GTXDYQc0KdKvnjkO/jfUM2YqHVYfhD3Wo=
-github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
+github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/throttled/throttled v2.2.5+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos=
-github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
-github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
-github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw=
-github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU=
+github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k=
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y=
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II=
-github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
-github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
-github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
-github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
+github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
+github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
-github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg=
-github.com/wiggin77/merror v1.0.3 h1:8+ZHV+aSnJoYghE3EUThl15C6rvF2TYRSvOSBjdmNR8=
-github.com/wiggin77/merror v1.0.3/go.mod h1:H2ETSu7/bPE0Ymf4bEwdUoo73OOEkdClnoRisfw0Nm0=
+github.com/wiggin77/merror v1.0.5 h1:P+lzicsn4vPMycAf2mFf7Zk6G9eco5N+jB1qJ2XW3ME=
+github.com/wiggin77/merror v1.0.5/go.mod h1:H2ETSu7/bPE0Ymf4bEwdUoo73OOEkdClnoRisfw0Nm0=
github.com/wiggin77/srslog v1.0.1 h1:gA2XjSMy3DrRdX9UqLuDtuVAAshb8bE1NhX1YK0Qe+8=
github.com/wiggin77/srslog v1.0.1/go.mod h1:fehkyYDq1QfuYn60TDPu9YdY2bB85VUW2mvN1WynEls=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg=
-github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
-github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
-github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
-github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
-github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
-github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
-github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
-github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.11/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
-github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
-gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
-go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
+go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
+go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
-golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
-golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211013171255-e13a2654a71e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
-golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
-golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220403205710-6acee93ad0eb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
-golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
-gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
-gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
-google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
-google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
-google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
-google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
-google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210721163202-f1cecdd8b78a/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210726143408-b02e89920bf0/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20211013025323-ce878158c4d4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
+google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
-gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/olivere/elastic.v6 v6.2.37/go.mod h1:2cTT8Z+/LcArSWpCgvZqBgt3VOqXiy7v00w12Lz8bd4=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
-gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
-gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
-modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
-modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878=
-modernc.org/cc/v3 v3.33.6/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.33.9/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.33.11/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.34.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.4/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.5/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.7/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.8/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.10/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.15/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
-modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo=
-modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60=
-modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw=
-modernc.org/ccgo/v3 v3.11.0/go.mod h1:dGNposbDp9TOZ/1KBxghxtUp/bzErD0/0QW4hhSaBMI=
-modernc.org/ccgo/v3 v3.11.1/go.mod h1:lWHxfsn13L3f7hgGsGlU28D9eUOf6y3ZYHKoPaKU0ag=
-modernc.org/ccgo/v3 v3.11.3/go.mod h1:0oHunRBMBiXOKdaglfMlRPBALQqsfrCKXgw9okQ3GEw=
-modernc.org/ccgo/v3 v3.12.4/go.mod h1:Bk+m6m2tsooJchP/Yk5ji56cClmN6R1cqc9o/YtbgBQ=
-modernc.org/ccgo/v3 v3.12.6/go.mod h1:0Ji3ruvpFPpz+yu+1m0wk68pdr/LENABhTrDkMDWH6c=
-modernc.org/ccgo/v3 v3.12.8/go.mod h1:Hq9keM4ZfjCDuDXxaHptpv9N24JhgBZmUG5q60iLgUo=
-modernc.org/ccgo/v3 v3.12.11/go.mod h1:0jVcmyDwDKDGWbcrzQ+xwJjbhZruHtouiBEvDfoIsdg=
-modernc.org/ccgo/v3 v3.12.14/go.mod h1:GhTu1k0YCpJSuWwtRAEHAol5W7g1/RRfS4/9hc9vF5I=
-modernc.org/ccgo/v3 v3.12.18/go.mod h1:jvg/xVdWWmZACSgOiAhpWpwHWylbJaSzayCqNOJKIhs=
-modernc.org/ccgo/v3 v3.12.20/go.mod h1:aKEdssiu7gVgSy/jjMastnv/q6wWGRbszbheXgWRHc8=
-modernc.org/ccgo/v3 v3.12.21/go.mod h1:ydgg2tEprnyMn159ZO/N4pLBqpL7NOkJ88GT5zNU2dE=
-modernc.org/ccgo/v3 v3.12.22/go.mod h1:nyDVFMmMWhMsgQw+5JH6B6o4MnZ+UQNw1pp52XYFPRk=
-modernc.org/ccgo/v3 v3.12.25/go.mod h1:UaLyWI26TwyIT4+ZFNjkyTbsPsY3plAEB6E7L/vZV3w=
-modernc.org/ccgo/v3 v3.12.29/go.mod h1:FXVjG7YLf9FetsS2OOYcwNhcdOLGt8S9bQ48+OP75cE=
-modernc.org/ccgo/v3 v3.12.36/go.mod h1:uP3/Fiezp/Ga8onfvMLpREq+KUjUmYMxXPO8tETHtA8=
-modernc.org/ccgo/v3 v3.12.38/go.mod h1:93O0G7baRST1vNj4wnZ49b1kLxt0xCW5Hsa2qRaZPqc=
-modernc.org/ccgo/v3 v3.12.43/go.mod h1:k+DqGXd3o7W+inNujK15S5ZYuPoWYLpF5PYougCmthU=
-modernc.org/ccgo/v3 v3.12.46/go.mod h1:UZe6EvMSqOxaJ4sznY7b23/k13R8XNlyWsO5bAmSgOE=
-modernc.org/ccgo/v3 v3.12.47/go.mod h1:m8d6p0zNps187fhBwzY/ii6gxfjob1VxWb919Nk1HUk=
-modernc.org/ccgo/v3 v3.12.50/go.mod h1:bu9YIwtg+HXQxBhsRDE+cJjQRuINuT9PUK4orOco/JI=
-modernc.org/ccgo/v3 v3.12.51/go.mod h1:gaIIlx4YpmGO2bLye04/yeblmvWEmE4BBBls4aJXFiE=
-modernc.org/ccgo/v3 v3.12.53/go.mod h1:8xWGGTFkdFEWBEsUmi+DBjwu/WLy3SSOrqEmKUjMeEg=
-modernc.org/ccgo/v3 v3.12.54/go.mod h1:yANKFTm9llTFVX1FqNKHE0aMcQb1fuPJx6p8AcUx+74=
-modernc.org/ccgo/v3 v3.12.55/go.mod h1:rsXiIyJi9psOwiBkplOaHye5L4MOOaCjHg1Fxkj7IeU=
-modernc.org/ccgo/v3 v3.12.56/go.mod h1:ljeFks3faDseCkr60JMpeDb2GSO3TKAmrzm7q9YOcMU=
-modernc.org/ccgo/v3 v3.12.57/go.mod h1:hNSF4DNVgBl8wYHpMvPqQWDQx8luqxDnNGCMM4NFNMc=
-modernc.org/ccgo/v3 v3.12.60/go.mod h1:k/Nn0zdO1xHVWjPYVshDeWKqbRWIfif5dtsIOCUVMqM=
-modernc.org/ccgo/v3 v3.12.66/go.mod h1:jUuxlCFZTUZLMV08s7B1ekHX5+LIAurKTTaugUr/EhQ=
-modernc.org/ccgo/v3 v3.12.67/go.mod h1:Bll3KwKvGROizP2Xj17GEGOTrlvB1XcVaBrC90ORO84=
-modernc.org/ccgo/v3 v3.12.73/go.mod h1:hngkB+nUUqzOf3iqsM48Gf1FZhY599qzVg1iX+BT3cQ=
-modernc.org/ccgo/v3 v3.12.81/go.mod h1:p2A1duHoBBg1mFtYvnhAnQyI6vL0uw5PGYLSIgF6rYY=
-modernc.org/ccgo/v3 v3.12.84/go.mod h1:ApbflUfa5BKadjHynCficldU1ghjen84tuM5jRynB7w=
-modernc.org/ccgo/v3 v3.12.86/go.mod h1:dN7S26DLTgVSni1PVA3KxxHTcykyDurf3OgUzNqTSrU=
-modernc.org/ccgo/v3 v3.12.88/go.mod h1:0MFzUHIuSIthpVZyMWiFYMwjiFnhrN5MkvBrUwON+ZM=
-modernc.org/ccgo/v3 v3.12.90/go.mod h1:obhSc3CdivCRpYZmrvO88TXlW0NvoSVvdh/ccRjJYko=
-modernc.org/ccgo/v3 v3.12.92/go.mod h1:5yDdN7ti9KWPi5bRVWPl8UNhpEAtCjuEE7ayQnzzqHA=
-modernc.org/ccgo/v3 v3.12.95/go.mod h1:ZcLyvtocXYi8uF+9Ebm3G8EF8HNY5hGomBqthDp4eC8=
-modernc.org/ccorpus v1.11.1/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
-modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8=
-modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw=
-modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
-modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
-modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM=
-modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
-modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
-modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
-modernc.org/libc v1.9.11/go.mod h1:NyF3tsA5ArIjJ83XB0JlqhjTabTCHm9aX4XMPHyQn0Q=
-modernc.org/libc v1.11.0/go.mod h1:2lOfPmj7cz+g1MrPNmX65QCzVxgNq2C5o0jdLY2gAYg=
-modernc.org/libc v1.11.2/go.mod h1:ioIyrl3ETkugDO3SGZ+6EOKvlP3zSOycUETe4XM4n8M=
-modernc.org/libc v1.11.5/go.mod h1:k3HDCP95A6U111Q5TmG3nAyUcp3kR5YFZTeDS9v8vSU=
-modernc.org/libc v1.11.6/go.mod h1:ddqmzR6p5i4jIGK1d/EiSw97LBcE3dK24QEwCFvgNgE=
-modernc.org/libc v1.11.11/go.mod h1:lXEp9QOOk4qAYOtL3BmMve99S5Owz7Qyowzvg6LiZso=
-modernc.org/libc v1.11.13/go.mod h1:ZYawJWlXIzXy2Pzghaf7YfM8OKacP3eZQI81PDLFdY8=
-modernc.org/libc v1.11.16/go.mod h1:+DJquzYi+DMRUtWI1YNxrlQO6TcA5+dRRiq8HWBWRC8=
-modernc.org/libc v1.11.19/go.mod h1:e0dgEame6mkydy19KKaVPBeEnyJB4LGNb0bBH1EtQ3I=
-modernc.org/libc v1.11.24/go.mod h1:FOSzE0UwookyT1TtCJrRkvsOrX2k38HoInhw+cSCUGk=
-modernc.org/libc v1.11.26/go.mod h1:SFjnYi9OSd2W7f4ct622o/PAYqk7KHv6GS8NZULIjKY=
-modernc.org/libc v1.11.27/go.mod h1:zmWm6kcFXt/jpzeCgfvUNswM0qke8qVwxqZrnddlDiE=
-modernc.org/libc v1.11.28/go.mod h1:Ii4V0fTFcbq3qrv3CNn+OGHAvzqMBvC7dBNyC4vHZlg=
-modernc.org/libc v1.11.31/go.mod h1:FpBncUkEAtopRNJj8aRo29qUiyx5AvAlAxzlx9GNaVM=
-modernc.org/libc v1.11.34/go.mod h1:+Tzc4hnb1iaX/SKAutJmfzES6awxfU1BPvrrJO0pYLg=
-modernc.org/libc v1.11.37/go.mod h1:dCQebOwoO1046yTrfUE5nX1f3YpGZQKNcITUYWlrAWo=
-modernc.org/libc v1.11.39/go.mod h1:mV8lJMo2S5A31uD0k1cMu7vrJbSA3J3waQJxpV4iqx8=
-modernc.org/libc v1.11.42/go.mod h1:yzrLDU+sSjLE+D4bIhS7q1L5UwXDOw99PLSX0BlZvSQ=
-modernc.org/libc v1.11.44/go.mod h1:KFq33jsma7F5WXiYelU8quMJasCCTnHK0mkri4yPHgA=
-modernc.org/libc v1.11.45/go.mod h1:Y192orvfVQQYFzCNsn+Xt0Hxt4DiO4USpLNXBlXg/tM=
-modernc.org/libc v1.11.47/go.mod h1:tPkE4PzCTW27E6AIKIR5IwHAQKCAtudEIeAV1/SiyBg=
-modernc.org/libc v1.11.49/go.mod h1:9JrJuK5WTtoTWIFQ7QjX2Mb/bagYdZdscI3xrvHbXjE=
-modernc.org/libc v1.11.51/go.mod h1:R9I8u9TS+meaWLdbfQhq2kFknTW0O3aw3kEMqDDxMaM=
-modernc.org/libc v1.11.53/go.mod h1:5ip5vWYPAoMulkQ5XlSJTy12Sz5U6blOQiYasilVPsU=
-modernc.org/libc v1.11.54/go.mod h1:S/FVnskbzVUrjfBqlGFIPA5m7UwB3n9fojHhCNfSsnw=
-modernc.org/libc v1.11.55/go.mod h1:j2A5YBRm6HjNkoSs/fzZrSxCuwWqcMYTDPLNx0URn3M=
-modernc.org/libc v1.11.56/go.mod h1:pakHkg5JdMLt2OgRadpPOTnyRXm/uzu+Yyg/LSLdi18=
-modernc.org/libc v1.11.58/go.mod h1:ns94Rxv0OWyoQrDqMFfWwka2BcaF6/61CqJRK9LP7S8=
-modernc.org/libc v1.11.71/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw=
-modernc.org/libc v1.11.75/go.mod h1:dGRVugT6edz361wmD9gk6ax1AbDSe0x5vji0dGJiPT0=
-modernc.org/libc v1.11.82/go.mod h1:NF+Ek1BOl2jeC7lw3a7Jj5PWyHPwWD4aq3wVKxqV1fI=
-modernc.org/libc v1.11.86/go.mod h1:ePuYgoQLmvxdNT06RpGnaDKJmDNEkV7ZPKI2jnsvZoE=
-modernc.org/libc v1.11.87/go.mod h1:Qvd5iXTeLhI5PS0XSyqMY99282y+3euapQFxM7jYnpY=
-modernc.org/libc v1.11.88/go.mod h1:h3oIVe8dxmTcchcFuCcJ4nAWaoiwzKCdv82MM0oiIdQ=
-modernc.org/libc v1.11.90/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c=
-modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c=
-modernc.org/libc v1.11.99/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
-modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
-modernc.org/libc v1.11.104/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ=
-modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
-modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
-modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
-modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
-modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
-modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM=
-modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
-modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
-modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
-modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
-modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y=
-modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
-modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
-modernc.org/tcl v1.9.2/go.mod h1:aw7OnlIoiuJgu1gwbTZtrKnGpDqH9wyH++jZcxdqNsg=
-modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
-modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
-modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
-modernc.org/z v1.2.20/go.mod h1:zU9FiF4PbHdOTUxw+IF8j7ArBMRPsHgq10uVPt6xTzo=
-modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/mm-go-irckit/channel.go b/mm-go-irckit/channel.go
index 7d4c7bd1..bfabe66b 100644
--- a/mm-go-irckit/channel.go
+++ b/mm-go-irckit/channel.go
@@ -6,7 +6,7 @@ import (
"sync"
"time"
- "github.com/mattermost/mattermost-server/v6/model"
+ "github.com/mattermost/mattermost/server/public/model"
"github.com/muesli/reflow/wordwrap"
"github.com/sorcix/irc"
)
diff --git a/mm-go-irckit/service.go b/mm-go-irckit/service.go
index fd8f4b25..cbd818a1 100644
--- a/mm-go-irckit/service.go
+++ b/mm-go-irckit/service.go
@@ -10,7 +10,7 @@ import (
"unicode"
"github.com/42wim/matterircd/bridge"
- "github.com/mattermost/mattermost-server/v6/model"
+ "github.com/mattermost/mattermost/server/public/model"
)
type CommandHandler interface {
diff --git a/mm-go-irckit/userbridge.go b/mm-go-irckit/userbridge.go
index d7c4c97d..b193bb2e 100644
--- a/mm-go-irckit/userbridge.go
+++ b/mm-go-irckit/userbridge.go
@@ -20,7 +20,7 @@ import (
"github.com/42wim/matterircd/bridge/slack"
"github.com/alecthomas/chroma/v2/quick"
"github.com/davecgh/go-spew/spew"
- "github.com/mattermost/mattermost-server/v6/model"
+ "github.com/mattermost/mattermost/server/public/model"
"github.com/muesli/reflow/wordwrap"
"github.com/sorcix/irc"
"github.com/spf13/viper"
diff --git a/vendor/github.com/alecthomas/chroma/v2/.editorconfig b/vendor/github.com/alecthomas/chroma/v2/.editorconfig
index d80374e0..cfb2c669 100644
--- a/vendor/github.com/alecthomas/chroma/v2/.editorconfig
+++ b/vendor/github.com/alecthomas/chroma/v2/.editorconfig
@@ -11,3 +11,7 @@ insert_final_newline = true
indent_style = space
indent_size = 2
insert_final_newline = false
+
+[*.yml]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/alecthomas/chroma/v2/.golangci.yml b/vendor/github.com/alecthomas/chroma/v2/.golangci.yml
index 120ea715..668be374 100644
--- a/vendor/github.com/alecthomas/chroma/v2/.golangci.yml
+++ b/vendor/github.com/alecthomas/chroma/v2/.golangci.yml
@@ -49,6 +49,8 @@ linters:
- nosnakecase
- testableexamples
- musttag
+ - depguard
+ - goconst
linters-settings:
govet:
diff --git a/vendor/github.com/alecthomas/chroma/v2/README.md b/vendor/github.com/alecthomas/chroma/v2/README.md
index cc504eb5..775d3af2 100644
--- a/vendor/github.com/alecthomas/chroma/v2/README.md
+++ b/vendor/github.com/alecthomas/chroma/v2/README.md
@@ -8,75 +8,72 @@ highlighted HTML, ANSI-coloured text, etc.
Chroma is based heavily on [Pygments](http://pygments.org/), and includes
translators for Pygments lexers and styles.
-
-
## Table of Contents
-1. [Table of Contents](#table-of-contents)
-2. [Supported languages](#supported-languages)
-3. [Try it](#try-it)
-4. [Using the library](#using-the-library)
+1. [Supported languages](#supported-languages)
+2. [Try it](#try-it)
+3. [Using the library](#using-the-library)
1. [Quick start](#quick-start)
2. [Identifying the language](#identifying-the-language)
3. [Formatting the output](#formatting-the-output)
4. [The HTML formatter](#the-html-formatter)
-5. [More detail](#more-detail)
+4. [More detail](#more-detail)
1. [Lexers](#lexers)
2. [Formatters](#formatters)
3. [Styles](#styles)
-6. [Command-line interface](#command-line-interface)
-7. [Testing lexers](#testing-lexers)
-8. [What's missing compared to Pygments?](#whats-missing-compared-to-pygments)
+5. [Command-line interface](#command-line-interface)
+6. [Testing lexers](#testing-lexers)
+7. [What's missing compared to Pygments?](#whats-missing-compared-to-pygments)
-
-
## Supported languages
-| Prefix | Language |
-| :----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| A | ABAP, ABNF, ActionScript, ActionScript 3, Ada, AL, Angular2, ANTLR, ApacheConf, APL, AppleScript, ArangoDB AQL, Arduino, ArmAsm, AutoHotkey, AutoIt, Awk |
-| B | Ballerina, Bash, Bash Session, Batchfile, BibTeX, Bicep, BlitzBasic, BNF, BQN, Brainfuck |
-| C | C, C#, C++, Caddyfile, Caddyfile Directives, Cap'n Proto, Cassandra CQL, Ceylon, CFEngine3, cfstatement, ChaiScript, Chapel, Cheetah, Clojure, CMake, COBOL, CoffeeScript, Common Lisp, Coq, Crystal, CSS, Cython |
-| D | D, Dart, Diff, Django/Jinja, dns, Docker, DTD, Dylan |
-| E | EBNF, Elixir, Elm, EmacsLisp, Erlang |
-| F | Factor, Fennel, Fish, Forth, Fortran, FortranFixed, FSharp |
-| G | GAS, GDScript, Genshi, Genshi HTML, Genshi Text, Gherkin, GLSL, Gnuplot, Go, Go HTML Template, Go Text Template, GraphQL, Groff, Groovy |
-| H | Handlebars, Haskell, Haxe, HCL, Hexdump, HLB, HLSL, HolyC, HTML, HTTP, Hy |
-| I | Idris, Igor, INI, Io, ISCdhcpd |
-| J | J, Java, JavaScript, JSON, Julia, Jungle |
-| K | Kotlin |
-| L | Lighttpd configuration file, LLVM, Lua |
-| M | Makefile, Mako, markdown, Mason, Mathematica, Matlab, mcfunction, Meson, Metal, MiniZinc, MLIR, Modula-2, MonkeyC, MorrowindScript, Myghty, MySQL |
-| N | NASM, Natural, Newspeak, Nginx configuration file, Nim, Nix |
-| O | Objective-C, OCaml, Octave, Odin, OnesEnterprise, OpenEdge ABL, OpenSCAD, Org Mode |
-| P | PacmanConf, Perl, PHP, PHTML, Pig, PkgConfig, PL/pgSQL, plaintext, Plutus Core, Pony, PostgreSQL SQL dialect, PostScript, POVRay, PowerQuery, PowerShell, Prolog, PromQL, properties, Protocol Buffer, PSL, Puppet, Python, Python 2 |
-| Q | QBasic, QML |
-| R | R, Racket, Ragel, Raku, react, ReasonML, reg, reStructuredText, Rexx, Ruby, Rust |
-| S | SAS, Sass, Scala, Scheme, Scilab, SCSS, Sed, Sieve, Smali, Smalltalk, Smarty, Snobol, Solidity, SourcePawn, SPARQL, SQL, SquidConf, Standard ML, stas, Stylus, Svelte, Swift, SYSTEMD, systemverilog |
-| T | TableGen, Tal, TASM, Tcl, Tcsh, Termcap, Terminfo, Terraform, TeX, Thrift, TOML, TradingView, Transact-SQL, Turing, Turtle, Twig, TypeScript, TypoScript, TypoScriptCssData, TypoScriptHtmlData |
-| V | V, V shell, Vala, VB.net, verilog, VHDL, VHS, VimL, vue |
-| W | WDTE, WebGPU Shading Language, Whiley |
-| X | XML, Xorg |
-| Y | YAML, YANG |
-| Z | Z80 Assembly, Zed, Zig |
+| Prefix | Language |
+| :----: | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| A | ABAP, ABNF, ActionScript, ActionScript 3, Ada, Agda, AL, Alloy, Angular2, ANTLR, ApacheConf, APL, AppleScript, ArangoDB AQL, Arduino, ArmAsm, AutoHotkey, AutoIt, Awk |
+| B | Ballerina, Bash, Bash Session, Batchfile, BibTeX, Bicep, BlitzBasic, BNF, BQN, Brainfuck |
+| C | C, C#, C++, Caddyfile, Caddyfile Directives, Cap'n Proto, Cassandra CQL, Ceylon, CFEngine3, cfstatement, ChaiScript, Chapel, Cheetah, Clojure, CMake, COBOL, CoffeeScript, Common Lisp, Coq, Crystal, CSS, Cython |
+| D | D, Dart, Dax, Desktop Entry, Diff, Django/Jinja, dns, Docker, DTD, Dylan |
+| E | EBNF, Elixir, Elm, EmacsLisp, Erlang |
+| F | Factor, Fennel, Fish, Forth, Fortran, FortranFixed, FSharp |
+| G | GAS, GDScript, Genshi, Genshi HTML, Genshi Text, Gherkin, GLSL, Gnuplot, Go, Go HTML Template, Go Text Template, GraphQL, Groff, Groovy |
+| H | Handlebars, Hare, Haskell, Haxe, HCL, Hexdump, HLB, HLSL, HolyC, HTML, HTTP, Hy |
+| I | Idris, Igor, INI, Io, ISCdhcpd |
+| J | J, Java, JavaScript, JSON, Julia, Jungle |
+| K | Kotlin |
+| L | Lighttpd configuration file, LLVM, Lua |
+| M | Makefile, Mako, markdown, Mason, Materialize SQL dialect, Mathematica, Matlab, mcfunction, Meson, Metal, MiniZinc, MLIR, Modula-2, MonkeyC, MorrowindScript, Myghty, MySQL |
+| N | NASM, Natural, Newspeak, Nginx configuration file, Nim, Nix |
+| O | Objective-C, OCaml, Octave, Odin, OnesEnterprise, OpenEdge ABL, OpenSCAD, Org Mode |
+| P | PacmanConf, Perl, PHP, PHTML, Pig, PkgConfig, PL/pgSQL, plaintext, Plutus Core, Pony, PostgreSQL SQL dialect, PostScript, POVRay, PowerQuery, PowerShell, Prolog, PromQL, Promela, properties, Protocol Buffer, PRQL, PSL, Puppet, Python, Python 2 |
+| Q | QBasic, QML |
+| R | R, Racket, Ragel, Raku, react, ReasonML, reg, Rego, reStructuredText, Rexx, RPMSpec, Ruby, Rust |
+| S | SAS, Sass, Scala, Scheme, Scilab, SCSS, Sed, Sieve, Smali, Smalltalk, Smarty, Snobol, Solidity, SourcePawn, SPARQL, SQL, SquidConf, Standard ML, stas, Stylus, Svelte, Swift, SYSTEMD, systemverilog |
+| T | TableGen, Tal, TASM, Tcl, Tcsh, Termcap, Terminfo, Terraform, TeX, Thrift, TOML, TradingView, Transact-SQL, Turing, Turtle, Twig, TypeScript, TypoScript, TypoScriptCssData, TypoScriptHtmlData |
+| V | V, V shell, Vala, VB.net, verilog, VHDL, VHS, VimL, vue |
+| W | WDTE, WebGPU Shading Language, Whiley |
+| X | XML, Xorg |
+| Y | YAML, YANG |
+| Z | Z80 Assembly, Zed, Zig |
_I will attempt to keep this section up to date, but an authoritative list can be
displayed with `chroma --list`._
-
-
## Try it
Try out various languages and styles on the [Chroma Playground](https://swapoff.org/chroma/playground/).
-
-
## Using the library
+This is version 2 of Chroma, use the import path:
+
+```go
+import "github.com/alecthomas/chroma/v2"
+```
+
Chroma, like Pygments, has the concepts of
[lexers](https://github.com/alecthomas/chroma/tree/master/lexers),
[formatters](https://github.com/alecthomas/chroma/tree/master/formatters) and
@@ -95,8 +92,6 @@ In all cases, if a lexer, formatter or style can not be determined, `nil` will
be returned. In this situation you may want to default to the `Fallback`
value in each respective package, which provides sane defaults.
-
-
### Quick start
A convenience function exists that can be used to simply format some source
@@ -106,8 +101,6 @@ text, without any effort:
err := quick.Highlight(os.Stdout, someSourceCode, "go", "html", "monokai")
```
-
-
### Identifying the language
To highlight code, you'll first have to identify what language the code is
@@ -147,8 +140,6 @@ token types into a single token:
lexer = chroma.Coalesce(lexer)
```
-
-
### Formatting the output
Once a language is identified you will need to pick a formatter and a style (theme).
@@ -177,8 +168,6 @@ And finally, format the tokens from the iterator:
err := formatter.Format(w, style, iterator)
```
-
-
### The HTML formatter
By default the `html` registered formatter generates standalone HTML with
@@ -203,12 +192,8 @@ formatter := html.New(html.WithClasses(true))
err := formatter.WriteCSS(w, style)
```
-
-
## More detail
-
-
### Lexers
See the [Pygments documentation](http://pygments.org/docs/lexerdevelopment/)
@@ -228,8 +213,6 @@ python3 _tools/pygments2chroma_xml.py \
See notes in [pygments-lexers.txt](https://github.com/alecthomas/chroma/blob/master/pygments-lexers.txt)
for a list of lexers, and notes on some of the issues importing them.
-
-
### Formatters
Chroma supports HTML output, as well as terminal output in 8 colour, 256 colour, and true-colour.
@@ -237,8 +220,6 @@ Chroma supports HTML output, as well as terminal output in 8 colour, 256 colour,
A `noop` formatter is included that outputs the token text only, and a `tokens`
formatter outputs raw tokens. The latter is useful for debugging lexers.
-
-
### Styles
Chroma styles are defined in XML. The style entries use the
@@ -262,8 +243,6 @@ Also, token types in a style file are hierarchical. For instance, when `CommentS
For a quick overview of the available styles and how they look, check out the [Chroma Style Gallery](https://xyproto.github.io/splash/docs/).
-
-
## Command-line interface
A command-line interface to Chroma is included.
@@ -288,10 +267,6 @@ on under the hood for easy integration with [lesspipe shipping with
Debian and derivatives](https://manpages.debian.org/lesspipe#USER_DEFINED_FILTERS);
for that setup the `chroma` executable can be just symlinked to `~/.lessfilter`.
-
-
-
-
## Testing lexers
If you edit some lexers and want to try it, open a shell in `cmd/chromad` and run:
diff --git a/vendor/github.com/alecthomas/chroma/v2/formatters/html/html.go b/vendor/github.com/alecthomas/chroma/v2/formatters/html/html.go
index 0ad6b31c..92d784c2 100644
--- a/vendor/github.com/alecthomas/chroma/v2/formatters/html/html.go
+++ b/vendor/github.com/alecthomas/chroma/v2/formatters/html/html.go
@@ -5,7 +5,9 @@ import (
"html"
"io"
"sort"
+ "strconv"
"strings"
+ "sync"
"github.com/alecthomas/chroma/v2"
)
@@ -132,6 +134,7 @@ func New(options ...Option) *Formatter {
baseLineNumber: 1,
preWrapper: defaultPreWrapper,
}
+ f.styleCache = newStyleCache(f)
for _, option := range options {
option(f)
}
@@ -188,6 +191,7 @@ var (
// Formatter that generates HTML.
type Formatter struct {
+ styleCache *styleCache
standalone bool
prefix string
Classes bool // Exported field to detect when classes are being used
@@ -220,12 +224,7 @@ func (f *Formatter) Format(w io.Writer, style *chroma.Style, iterator chroma.Ite
//
// OTOH we need to be super careful about correct escaping...
func (f *Formatter) writeHTML(w io.Writer, style *chroma.Style, tokens []chroma.Token) (err error) { // nolint: gocyclo
- css := f.styleToCSS(style)
- if !f.Classes {
- for t, style := range css {
- css[t] = compressStyle(style)
- }
- }
+ css := f.styleCache.get(style, true)
if f.standalone {
fmt.Fprint(w, "\n")
if f.Classes {
@@ -243,7 +242,7 @@ func (f *Formatter) writeHTML(w io.Writer, style *chroma.Style, tokens []chroma.
wrapInTable := f.lineNumbers && f.lineNumbersInTable
lines := chroma.SplitTokensIntoLines(tokens)
- lineDigits := len(fmt.Sprintf("%d", f.baseLineNumber+len(lines)-1))
+ lineDigits := len(strconv.Itoa(f.baseLineNumber + len(lines) - 1))
highlightIndex := 0
if wrapInTable {
@@ -251,7 +250,7 @@ func (f *Formatter) writeHTML(w io.Writer, style *chroma.Style, tokens []chroma.
fmt.Fprintf(w, "
\n", f.styleAttr(css, chroma.PreWrapper))
fmt.Fprintf(w, "
", f.styleAttr(css, chroma.LineTable))
fmt.Fprintf(w, "\n", f.styleAttr(css, chroma.LineTableTD))
- fmt.Fprintf(w, f.preWrapper.Start(false, f.styleAttr(css, chroma.PreWrapper)))
+ fmt.Fprintf(w, "%s", f.preWrapper.Start(false, f.styleAttr(css, chroma.PreWrapper)))
for index := range lines {
line := f.baseLineNumber + index
highlight, next := f.shouldHighlight(highlightIndex, line)
@@ -273,7 +272,7 @@ func (f *Formatter) writeHTML(w io.Writer, style *chroma.Style, tokens []chroma.
fmt.Fprintf(w, " | \n", f.styleAttr(css, chroma.LineTableTD, "width:100%"))
}
- fmt.Fprintf(w, f.preWrapper.Start(true, f.styleAttr(css, chroma.PreWrapper)))
+ fmt.Fprintf(w, "%s", f.preWrapper.Start(true, f.styleAttr(css, chroma.PreWrapper)))
highlightIndex = 0
for index, tokens := range lines {
@@ -323,7 +322,7 @@ func (f *Formatter) writeHTML(w io.Writer, style *chroma.Style, tokens []chroma.
fmt.Fprint(w, ``) // End of Line
}
}
- fmt.Fprintf(w, f.preWrapper.End(true))
+ fmt.Fprintf(w, "%s", f.preWrapper.End(true))
if wrapInTable {
fmt.Fprint(w, " |
\n")
@@ -419,7 +418,7 @@ func (f *Formatter) tabWidthStyle() string {
// WriteCSS writes CSS style definitions (without any surrounding HTML).
func (f *Formatter) WriteCSS(w io.Writer, style *chroma.Style) error {
- css := f.styleToCSS(style)
+ css := f.styleCache.get(style, false)
// Special-case background as it is mapped to the outer ".chroma" class.
if _, err := fmt.Fprintf(w, "/* %s */ .%sbg { %s }\n", chroma.Background, f.prefix, css[chroma.Background]); err != nil {
return err
@@ -562,3 +561,63 @@ func compressStyle(s string) string {
}
return strings.Join(out, ";")
}
+
+const styleCacheLimit = 32
+
+type styleCacheEntry struct {
+ style *chroma.Style
+ compressed bool
+ cache map[chroma.TokenType]string
+}
+
+type styleCache struct {
+ mu sync.Mutex
+ // LRU cache of compiled (and possibly compressed) styles. This is a slice
+ // because the cache size is small, and a slice is sufficiently fast for
+ // small N.
+ cache []styleCacheEntry
+ f *Formatter
+}
+
+func newStyleCache(f *Formatter) *styleCache {
+ return &styleCache{f: f}
+}
+
+func (l *styleCache) get(style *chroma.Style, compress bool) map[chroma.TokenType]string {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ // Look for an existing entry.
+ for i := len(l.cache) - 1; i >= 0; i-- {
+ entry := l.cache[i]
+ if entry.style == style && entry.compressed == compress {
+ // Top of the cache, no need to adjust the order.
+ if i == len(l.cache)-1 {
+ return entry.cache
+ }
+ // Move this entry to the end of the LRU
+ copy(l.cache[i:], l.cache[i+1:])
+ l.cache[len(l.cache)-1] = entry
+ return entry.cache
+ }
+ }
+
+ // No entry, create one.
+ cached := l.f.styleToCSS(style)
+ if !l.f.Classes {
+ for t, style := range cached {
+ cached[t] = compressStyle(style)
+ }
+ }
+ if compress {
+ for t, style := range cached {
+ cached[t] = compressStyle(style)
+ }
+ }
+ // Evict the oldest entry.
+ if len(l.cache) >= styleCacheLimit {
+ l.cache = l.cache[0:copy(l.cache, l.cache[1:])]
+ }
+ l.cache = append(l.cache, styleCacheEntry{style: style, cache: cached, compressed: compress})
+ return cached
+}
diff --git a/vendor/github.com/alecthomas/chroma/v2/formatters/tty_indexed.go b/vendor/github.com/alecthomas/chroma/v2/formatters/tty_indexed.go
index 2393f58d..d383d985 100644
--- a/vendor/github.com/alecthomas/chroma/v2/formatters/tty_indexed.go
+++ b/vendor/github.com/alecthomas/chroma/v2/formatters/tty_indexed.go
@@ -242,12 +242,21 @@ func (c *indexedTTYFormatter) Format(w io.Writer, style *chroma.Style, it chroma
theme := styleToEscapeSequence(c.table, style)
for token := it(); token != chroma.EOF; token = it() {
clr, ok := theme[token.Type]
+
+ // This search mimics how styles.Get() is used in tty_truecolour.go.
if !ok {
clr, ok = theme[token.Type.SubCategory()]
if !ok {
- clr = theme[token.Type.Category()]
+ clr, ok = theme[token.Type.Category()]
+ if !ok {
+ clr, ok = theme[chroma.Text]
+ if !ok {
+ clr = theme[chroma.Background]
+ }
+ }
}
}
+
if clr != "" {
fmt.Fprint(w, clr)
}
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/caddyfile.go b/vendor/github.com/alecthomas/chroma/v2/lexers/caddyfile.go
index 9100efa8..82a7efa4 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/caddyfile.go
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/caddyfile.go
@@ -4,52 +4,82 @@ import (
. "github.com/alecthomas/chroma/v2" // nolint
)
+// Matcher token stub for docs, or
+// Named matcher: @name, or
+// Path matcher: /foo, or
+// Wildcard path matcher: *
+// nolint: gosec
+var caddyfileMatcherTokenRegexp = `(\[\
\]|@[^\s]+|/[^\s]+|\*)`
+
+// Comment at start of line, or
+// Comment preceded by whitespace
+var caddyfileCommentRegexp = `(^|\s+)#.*\n`
+
// caddyfileCommon are the rules common to both of the lexer variants
func caddyfileCommonRules() Rules {
return Rules{
"site_block_common": {
+ Include("site_body"),
+ // Any other directive
+ {`[^\s#]+`, Keyword, Push("directive")},
+ Include("base"),
+ },
+ "site_body": {
// Import keyword
- {`(import)(\s+)([^\s]+)`, ByGroups(Keyword, Text, NameVariableMagic), nil},
+ {`\b(import|invoke)\b( [^\s#]+)`, ByGroups(Keyword, Text), Push("subdirective")},
// Matcher definition
{`@[^\s]+(?=\s)`, NameDecorator, Push("matcher")},
// Matcher token stub for docs
{`\[\\]`, NameDecorator, Push("matcher")},
// These cannot have matchers but may have things that look like
// matchers in their arguments, so we just parse as a subdirective.
- {`try_files`, Keyword, Push("subdirective")},
+ {`\b(try_files|tls|log|bind)\b`, Keyword, Push("subdirective")},
// These are special, they can nest more directives
- {`handle_errors|handle|route|handle_path|not`, Keyword, Push("nested_directive")},
- // Any other directive
- {`[^\s#]+`, Keyword, Push("directive")},
- Include("base"),
+ {`\b(handle_errors|handle_path|handle_response|replace_status|handle|route)\b`, Keyword, Push("nested_directive")},
+ // uri directive has special syntax
+ {`\b(uri)\b`, Keyword, Push("uri_directive")},
},
"matcher": {
{`\{`, Punctuation, Push("block")},
// Not can be one-liner
{`not`, Keyword, Push("deep_not_matcher")},
+ // Heredoc for CEL expression
+ Include("heredoc"),
+ // Backtick for CEL expression
+ {"`", StringBacktick, Push("backticks")},
// Any other same-line matcher
{`[^\s#]+`, Keyword, Push("arguments")},
// Terminators
- {`\n`, Text, Pop(1)},
+ {`\s*\n`, Text, Pop(1)},
{`\}`, Punctuation, Pop(1)},
Include("base"),
},
"block": {
{`\}`, Punctuation, Pop(2)},
+ // Using double quotes doesn't stop at spaces
+ {`"`, StringDouble, Push("double_quotes")},
+ // Using backticks doesn't stop at spaces
+ {"`", StringBacktick, Push("backticks")},
// Not can be one-liner
{`not`, Keyword, Push("not_matcher")},
- // Any other subdirective
+ // Directives & matcher definitions
+ Include("site_body"),
+ // Any directive
{`[^\s#]+`, Keyword, Push("subdirective")},
Include("base"),
},
"nested_block": {
{`\}`, Punctuation, Pop(2)},
- // Matcher definition
- {`@[^\s]+(?=\s)`, NameDecorator, Push("matcher")},
- // Something that starts with literally < is probably a docs stub
- {`\<[^#]+\>`, Keyword, Push("nested_directive")},
- // Any other directive
- {`[^\s#]+`, Keyword, Push("nested_directive")},
+ // Using double quotes doesn't stop at spaces
+ {`"`, StringDouble, Push("double_quotes")},
+ // Using backticks doesn't stop at spaces
+ {"`", StringBacktick, Push("backticks")},
+ // Not can be one-liner
+ {`not`, Keyword, Push("not_matcher")},
+ // Directives & matcher definitions
+ Include("site_body"),
+ // Any other subdirective
+ {`[^\s#]+`, Keyword, Push("directive")},
Include("base"),
},
"not_matcher": {
@@ -66,69 +96,97 @@ func caddyfileCommonRules() Rules {
},
"directive": {
{`\{(?=\s)`, Punctuation, Push("block")},
- Include("matcher_token"),
- Include("comments_pop_1"),
- {`\n`, Text, Pop(1)},
+ {caddyfileMatcherTokenRegexp, NameDecorator, Push("arguments")},
+ {caddyfileCommentRegexp, CommentSingle, Pop(1)},
+ {`\s*\n`, Text, Pop(1)},
Include("base"),
},
"nested_directive": {
{`\{(?=\s)`, Punctuation, Push("nested_block")},
- Include("matcher_token"),
- Include("comments_pop_1"),
- {`\n`, Text, Pop(1)},
+ {caddyfileMatcherTokenRegexp, NameDecorator, Push("nested_arguments")},
+ {caddyfileCommentRegexp, CommentSingle, Pop(1)},
+ {`\s*\n`, Text, Pop(1)},
Include("base"),
},
"subdirective": {
{`\{(?=\s)`, Punctuation, Push("block")},
- Include("comments_pop_1"),
- {`\n`, Text, Pop(1)},
+ {caddyfileCommentRegexp, CommentSingle, Pop(1)},
+ {`\s*\n`, Text, Pop(1)},
Include("base"),
},
"arguments": {
{`\{(?=\s)`, Punctuation, Push("block")},
- Include("comments_pop_2"),
+ {caddyfileCommentRegexp, CommentSingle, Pop(2)},
{`\\\n`, Text, nil}, // Skip escaped newlines
- {`\n`, Text, Pop(2)},
+ {`\s*\n`, Text, Pop(2)},
+ Include("base"),
+ },
+ "nested_arguments": {
+ {`\{(?=\s)`, Punctuation, Push("nested_block")},
+ {caddyfileCommentRegexp, CommentSingle, Pop(2)},
+ {`\\\n`, Text, nil}, // Skip escaped newlines
+ {`\s*\n`, Text, Pop(2)},
Include("base"),
},
"deep_subdirective": {
{`\{(?=\s)`, Punctuation, Push("block")},
- Include("comments_pop_3"),
- {`\n`, Text, Pop(3)},
+ {caddyfileCommentRegexp, CommentSingle, Pop(3)},
+ {`\s*\n`, Text, Pop(3)},
+ Include("base"),
+ },
+ "uri_directive": {
+ {`\{(?=\s)`, Punctuation, Push("block")},
+ {caddyfileMatcherTokenRegexp, NameDecorator, nil},
+ {`(strip_prefix|strip_suffix|replace|path_regexp)`, NameConstant, Push("arguments")},
+ {caddyfileCommentRegexp, CommentSingle, Pop(1)},
+ {`\s*\n`, Text, Pop(1)},
Include("base"),
},
- "matcher_token": {
- {`@[^\s]+`, NameDecorator, Push("arguments")}, // Named matcher
- {`/[^\s]+`, NameDecorator, Push("arguments")}, // Path matcher
- {`\*`, NameDecorator, Push("arguments")}, // Wildcard path matcher
- {`\[\\]`, NameDecorator, Push("arguments")}, // Matcher token stub for docs
+ "double_quotes": {
+ Include("placeholder"),
+ {`\\"`, StringDouble, nil},
+ {`[^"]`, StringDouble, nil},
+ {`"`, StringDouble, Pop(1)},
},
- "comments": {
- {`^#.*\n`, CommentSingle, nil}, // Comment at start of line
- {`\s+#.*\n`, CommentSingle, nil}, // Comment preceded by whitespace
+ "backticks": {
+ Include("placeholder"),
+ {"\\\\`", StringBacktick, nil},
+ {"[^`]", StringBacktick, nil},
+ {"`", StringBacktick, Pop(1)},
},
- "comments_pop_1": {
- {`^#.*\n`, CommentSingle, Pop(1)}, // Comment at start of line
- {`\s+#.*\n`, CommentSingle, Pop(1)}, // Comment preceded by whitespace
+ "optional": {
+ // Docs syntax for showing optional parts with [ ]
+ {`\[`, Punctuation, Push("optional")},
+ Include("name_constants"),
+ {`\|`, Punctuation, nil},
+ {`[^\[\]\|]+`, String, nil},
+ {`\]`, Punctuation, Pop(1)},
},
- "comments_pop_2": {
- {`^#.*\n`, CommentSingle, Pop(2)}, // Comment at start of line
- {`\s+#.*\n`, CommentSingle, Pop(2)}, // Comment preceded by whitespace
+ "heredoc": {
+ {`(<<([a-zA-Z0-9_-]+))(\n(.*|\n)*)(\s*)(\2)`, ByGroups(StringHeredoc, nil, String, String, String, StringHeredoc), nil},
},
- "comments_pop_3": {
- {`^#.*\n`, CommentSingle, Pop(3)}, // Comment at start of line
- {`\s+#.*\n`, CommentSingle, Pop(3)}, // Comment preceded by whitespace
+ "name_constants": {
+ {`\b(most_recently_modified|largest_size|smallest_size|first_exist|internal|disable_redirects|ignore_loaded_certs|disable_certs|private_ranges|first|last|before|after|on|off)\b(\||(?=\]|\s|$))`, ByGroups(NameConstant, Punctuation), nil},
+ },
+ "placeholder": {
+ // Placeholder with dots, colon for default value, brackets for args[0:]
+ {`\{[\w+.\[\]\:\$-]+\}`, StringEscape, nil},
+ // Handle opening brackets with no matching closing one
+ {`\{[^\}\s]*\b`, String, nil},
},
"base": {
- Include("comments"),
- {`(on|off|first|last|before|after|internal|strip_prefix|strip_suffix|replace)\b`, NameConstant, nil},
- {`(https?://)?([a-z0-9.-]+)(:)([0-9]+)`, ByGroups(Name, Name, Punctuation, LiteralNumberInteger), nil},
- {`[a-z-]+/[a-z-+]+`, LiteralString, nil},
- {`[0-9]+[km]?\b`, LiteralNumberInteger, nil},
- {`\{[\w+.\$-]+\}`, LiteralStringEscape, nil}, // Placeholder
- {`\[(?=[^#{}$]+\])`, Punctuation, nil},
- {`\]|\|`, Punctuation, nil},
- {`[^\s#{}$\]]+`, LiteralString, nil},
+ {caddyfileCommentRegexp, CommentSingle, nil},
+ {`\[\\]`, NameDecorator, nil},
+ Include("name_constants"),
+ Include("heredoc"),
+ {`(https?://)?([a-z0-9.-]+)(:)([0-9]+)([^\s]*)`, ByGroups(Name, Name, Punctuation, NumberInteger, Name), nil},
+ {`\[`, Punctuation, Push("optional")},
+ {"`", StringBacktick, Push("backticks")},
+ {`"`, StringDouble, Push("double_quotes")},
+ Include("placeholder"),
+ {`[a-z-]+/[a-z-+]+`, String, nil},
+ {`[0-9]+([smhdk]|ns|us|µs|ms)?\b`, NumberInteger, nil},
+ {`[^\s\n#\{]+`, String, nil},
{`/[^\s#]*`, Name, nil},
{`\s+`, Text, nil},
},
@@ -149,27 +207,29 @@ var Caddyfile = Register(MustNewLexer(
func caddyfileRules() Rules {
return Rules{
"root": {
- Include("comments"),
+ {caddyfileCommentRegexp, CommentSingle, nil},
// Global options block
{`^\s*(\{)\s*$`, ByGroups(Punctuation), Push("globals")},
+ // Top level import
+ {`(import)(\s+)([^\s]+)`, ByGroups(Keyword, Text, NameVariableMagic), nil},
// Snippets
- {`(\([^\s#]+\))(\s*)(\{)`, ByGroups(NameVariableAnonymous, Text, Punctuation), Push("snippet")},
+ {`(&?\([^\s#]+\))(\s*)(\{)`, ByGroups(NameVariableAnonymous, Text, Punctuation), Push("snippet")},
// Site label
{`[^#{(\s,]+`, GenericHeading, Push("label")},
// Site label with placeholder
- {`\{[\w+.\$-]+\}`, LiteralStringEscape, Push("label")},
+ {`\{[\w+.\[\]\:\$-]+\}`, StringEscape, Push("label")},
{`\s+`, Text, nil},
},
"globals": {
{`\}`, Punctuation, Pop(1)},
- {`[^\s#]+`, Keyword, Push("directive")},
+ // Global options are parsed as subdirectives (no matcher)
+ {`[^\s#]+`, Keyword, Push("subdirective")},
Include("base"),
},
"snippet": {
{`\}`, Punctuation, Pop(1)},
- // Matcher definition
- {`@[^\s]+(?=\s)`, NameDecorator, Push("matcher")},
- // Any directive
+ Include("site_body"),
+ // Any other directive
{`[^\s#]+`, Keyword, Push("directive")},
Include("base"),
},
@@ -179,7 +239,7 @@ func caddyfileRules() Rules {
{`,\s*\n?`, Text, nil},
{` `, Text, nil},
// Site label with placeholder
- {`\{[\w+.\$-]+\}`, LiteralStringEscape, nil},
+ Include("placeholder"),
// Site label
{`[^#{(\s,]+`, GenericHeading, nil},
// Comment after non-block label (hack because comments end in \n)
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/agda.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/agda.xml
new file mode 100644
index 00000000..6f2b2d50
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/agda.xml
@@ -0,0 +1,66 @@
+
+
+ Agda
+ agda
+ *.agda
+ text/x-agda
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/alloy.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/alloy.xml
new file mode 100644
index 00000000..1de9ea6c
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/alloy.xml
@@ -0,0 +1,58 @@
+
+
+
+ Alloy
+ alloy
+ *.als
+ text/x-alloy
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/bicep.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/bicep.xml
index 1efc9c8a..db90f31b 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/bicep.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/bicep.xml
@@ -5,63 +5,79 @@
*.bicep
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
-
-
+
+
+
+
-
+
-
-
+
+
+
-
-
+
+
-
+
-
-
-
-
-
-
-
-
-
-
+
-
-
+
+
-
+
-
-
+
+
-
-
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/c#.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/c#.xml
index 801a9543..f1e21db0 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/c#.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/c#.xml
@@ -19,10 +19,10 @@
-
+
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/cue.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/cue.xml
new file mode 100644
index 00000000..2a12f395
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/cue.xml
@@ -0,0 +1,85 @@
+
+
+ CUE
+ cue
+ *.cue
+ text/x-cue
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/dax.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/dax.xml
new file mode 100644
index 00000000..2bb3a1aa
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/dax.xml
@@ -0,0 +1,39 @@
+
+
+ Dax
+ dax
+ *.dax
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/desktop_entry.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/desktop_entry.xml
new file mode 100644
index 00000000..ad71ad47
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/desktop_entry.xml
@@ -0,0 +1,17 @@
+
+
+ Desktop file
+ desktop
+ desktop_entry
+ *.desktop
+ application/x-desktop
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/dns.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/dns.xml
index 5ac3c258..ef8f663f 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/dns.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/dns.xml
@@ -27,7 +27,7 @@
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/docker.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/docker.xml
index 77c89376..a73c52cd 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/docker.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/docker.xml
@@ -5,6 +5,7 @@
dockerfile
Dockerfile
Dockerfile.*
+ *.Dockerfile
*.docker
text/x-dockerfile-config
true
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript.xml
index 350beac7..811f38d0 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript.xml
@@ -7,8 +7,8 @@
text/x-gdscript
application/x-gdscript
0.1
-
-
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript3.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript3.xml
index ea78bb7e..b50c9dd7 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript3.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gdscript3.xml
@@ -6,8 +6,10 @@
*.gd
text/x-gdscript
application/x-gdscript
-
-
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gleam.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gleam.xml
new file mode 100644
index 00000000..c706e962
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/gleam.xml
@@ -0,0 +1,117 @@
+
+
+ Gleam
+ gleam>
+ *.gleam
+ text/x-gleam
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/go_template.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/go_template.xml
index 09009220..36f737b9 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/go_template.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/go_template.xml
@@ -2,6 +2,8 @@
Go Template
go-template
+ *.gotmpl
+ *.go.tmpl
@@ -109,4 +111,4 @@
-
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/hare.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/hare.xml
new file mode 100644
index 00000000..ea636422
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/hare.xml
@@ -0,0 +1,98 @@
+
+
+ Hare
+ hare
+ *.ha
+ text/x-hare
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/haskell.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/haskell.xml
index 6dc6912e..5f805d6d 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/haskell.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/haskell.xml
@@ -86,7 +86,7 @@
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/java.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/java.xml
index eabddee0..3ce33ff6 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/java.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/java.xml
@@ -5,116 +5,188 @@
*.java
text/x-java
true
- true
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
+
-
-
+
+
+
+
+
-
+
-
-
+
+
-
-
-
-
+
+
+
+
-
+
-
-
+
+
-
+
-
-
+
+
-
+
-
+
+
+
+
+
-
+
-
-
+
+
-
+
-
-
+
+
-
+
-
-
+
+
+
-
-
+
+
+
-
-
+
+
-
-
+
+
+
+
+
+
+
+
+
-
-
+
+
+
+
+
+
-
+
-
-
+
+
-
+
-
+
-
+
-
+
-
-
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/json.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/json.xml
index bbe10b1b..3473cfd1 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/json.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/json.xml
@@ -3,6 +3,7 @@
JSON
json
*.json
+ *.avsc
application/json
true
true
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/makefile.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/makefile.xml
index 81d18938..a82a7f8d 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/makefile.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/makefile.xml
@@ -12,6 +12,9 @@
Makefile.*
GNUmakefile
BSDmakefile
+ Justfile
+ justfile
+ .justfile
text/x-makefile
true
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/materialize_sql_dialect.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/materialize_sql_dialect.xml
new file mode 100644
index 00000000..7b22a46c
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/materialize_sql_dialect.xml
@@ -0,0 +1,155 @@
+
+
+ Materialize SQL dialect
+ materialize
+ mzsql
+ text/x-materializesql
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 6
+ 12
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 12
+ 4
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ndisasm.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ndisasm.xml
new file mode 100644
index 00000000..74d443b6
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ndisasm.xml
@@ -0,0 +1,123 @@
+
+
+ NDISASM
+ ndisasm
+ text/x-disasm
+ true
+ 0.5
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/nim.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/nim.xml
index 41c07cfb..bfdd6156 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/nim.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/nim.xml
@@ -132,6 +132,10 @@
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/objectpascal.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/objectpascal.xml
new file mode 100644
index 00000000..12af64b9
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/objectpascal.xml
@@ -0,0 +1,145 @@
+
+
+ ObjectPascal
+ objectpascal
+ *.pas
+ *.pp
+ *.inc
+ *.dpr
+ *.dpk
+ *.lpr
+ *.lpk
+ text/x-pascal
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/org_mode.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/org_mode.xml
index 3f227ad8..259e54ef 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/org_mode.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/org_mode.xml
@@ -228,42 +228,42 @@
-
+
-
+
-
+
-
+
-
+
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/promela.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/promela.xml
new file mode 100644
index 00000000..84558c3b
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/promela.xml
@@ -0,0 +1,119 @@
+
+
+
+ Promela
+ promela
+ *.pml
+ *.prom
+ *.prm
+ *.promela
+ *.pr
+ *.pm
+ text/x-promela
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/prql.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/prql.xml
new file mode 100644
index 00000000..21f21c65
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/prql.xml
@@ -0,0 +1,161 @@
+
+
+ PRQL
+ prql
+ *.prql
+ application/prql
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/python.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/python.xml
index 3c6af86e..a58686ff 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/python.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/python.xml
@@ -19,6 +19,10 @@
BUILD
BUILD.bazel
WORKSPACE
+ WORKSPACE.bzlmod
+ WORKSPACE.bazel
+ MODULE.bazel
+ REPO.bazel
*.tac
text/x-python
application/x-python
@@ -586,4 +590,4 @@
-
\ No newline at end of file
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/r.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/r.xml
index 1e44ca4a..c1fba4e5 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/r.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/r.xml
@@ -37,6 +37,9 @@
+
+
+
@@ -108,9 +111,6 @@
-
-
-
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/rego.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/rego.xml
new file mode 100644
index 00000000..517b7133
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/rego.xml
@@ -0,0 +1,94 @@
+
+
+ Rego
+ rego
+ *.rego
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/rpm_spec.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/rpm_spec.xml
new file mode 100644
index 00000000..8362772a
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/rpm_spec.xml
@@ -0,0 +1,58 @@
+
+
+
+ RPMSpec
+ spec
+ *.spec
+ text/x-rpm-spec
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ruby.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ruby.xml
index b47b1abe..baa7e435 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ruby.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ruby.xml
@@ -12,6 +12,7 @@
*.rbx
*.duby
Gemfile
+ Vagrantfile
text/x-ruby
application/x-ruby
true
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/typescript.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/typescript.xml
index d49241e6..9828e92f 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/typescript.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/typescript.xml
@@ -51,10 +51,13 @@
+
+
+
-
+
@@ -77,12 +80,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -200,7 +216,7 @@
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ucode.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ucode.xml
new file mode 100644
index 00000000..054fa891
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/ucode.xml
@@ -0,0 +1,147 @@
+
+
+ ucode
+ *.uc
+ application/x.ucode
+ text/x.ucode
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/vue.xml b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/vue.xml
index 75180207..ec350835 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/vue.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/embedded/vue.xml
@@ -83,9 +83,10 @@
-
+
+
@@ -104,9 +105,10 @@
-
+
+
@@ -258,14 +260,14 @@
-
+
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/go.go b/vendor/github.com/alecthomas/chroma/v2/lexers/go.go
index 77bc2259..266289b7 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/go.go
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/go.go
@@ -55,7 +55,7 @@ func goRules() Rules {
{`"(\\\\|\\"|[^"])*"`, LiteralString, nil},
{`(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\||<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])`, Operator, nil},
{`([a-zA-Z_]\w*)(\s*)(\()`, ByGroups(NameFunction, UsingSelf("root"), Punctuation), nil},
- {`[|^<>=!()\[\]{}.,;:]`, Punctuation, nil},
+ {`[|^<>=!()\[\]{}.,;:~]`, Punctuation, nil},
{`[^\W\d]\w*`, NameOther, nil},
},
}
diff --git a/vendor/github.com/alecthomas/chroma/v2/lexers/lexers.go b/vendor/github.com/alecthomas/chroma/v2/lexers/lexers.go
index 161caefe..4fa35ad2 100644
--- a/vendor/github.com/alecthomas/chroma/v2/lexers/lexers.go
+++ b/vendor/github.com/alecthomas/chroma/v2/lexers/lexers.go
@@ -30,6 +30,9 @@ func Names(withAliases bool) []string {
}
// Get a Lexer by name, alias or file extension.
+//
+// Note that this if there isn't an exact match on name or alias, this will
+// call Match(), so it is not efficient.
func Get(name string) chroma.Lexer {
return GlobalLexerRegistry.Get(name)
}
@@ -40,6 +43,9 @@ func MatchMimeType(mimeType string) chroma.Lexer {
}
// Match returns the first lexer matching filename.
+//
+// Note that this iterates over all file patterns in all lexers, so it's not
+// particularly efficient.
func Match(filename string) chroma.Lexer {
return GlobalLexerRegistry.Match(filename)
}
diff --git a/vendor/github.com/alecthomas/chroma/v2/registry.go b/vendor/github.com/alecthomas/chroma/v2/registry.go
index 55ed3623..4742e8c5 100644
--- a/vendor/github.com/alecthomas/chroma/v2/registry.go
+++ b/vendor/github.com/alecthomas/chroma/v2/registry.go
@@ -97,6 +97,8 @@ func (l *LexerRegistry) MatchMimeType(mimeType string) Lexer {
}
// Match returns the first lexer matching filename.
+//
+// Note that this iterates over all file patterns in all lexers, so is not fast.
func (l *LexerRegistry) Match(filename string) Lexer {
filename = filepath.Base(filename)
matched := PrioritisedLexers{}
diff --git a/vendor/github.com/alecthomas/chroma/v2/renovate.json5 b/vendor/github.com/alecthomas/chroma/v2/renovate.json5
new file mode 100644
index 00000000..77c7b016
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/renovate.json5
@@ -0,0 +1,18 @@
+{
+ $schema: "https://docs.renovatebot.com/renovate-schema.json",
+ extends: [
+ "config:recommended",
+ ":semanticCommits",
+ ":semanticCommitTypeAll(chore)",
+ ":semanticCommitScope(deps)",
+ "group:allNonMajor",
+ "schedule:earlyMondays", // Run once a week.
+ ],
+ packageRules: [
+ {
+ matchPackageNames: ["golangci-lint"],
+ matchManagers: ["hermit"],
+ enabled: false,
+ },
+ ],
+}
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-frappe.xml b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-frappe.xml
index 08eb42a6..0adf1ba9 100644
--- a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-frappe.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-frappe.xml
@@ -5,7 +5,7 @@
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-latte.xml b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-latte.xml
index 3d510743..3ea767fd 100644
--- a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-latte.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-latte.xml
@@ -5,7 +5,7 @@
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-macchiato.xml b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-macchiato.xml
index 5d96f599..6b500284 100644
--- a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-macchiato.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-macchiato.xml
@@ -5,7 +5,7 @@
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-mocha.xml b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-mocha.xml
index e17866dd..9a401912 100644
--- a/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-mocha.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/catppuccin-mocha.xml
@@ -5,7 +5,7 @@
-
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/github-dark.xml b/vendor/github.com/alecthomas/chroma/v2/styles/github-dark.xml
index 0adb7756..711aeafc 100644
--- a/vendor/github.com/alecthomas/chroma/v2/styles/github-dark.xml
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/github-dark.xml
@@ -1,6 +1,6 @@
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-day.xml b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-day.xml
new file mode 100644
index 00000000..c20d9a41
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-day.xml
@@ -0,0 +1,83 @@
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-moon.xml b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-moon.xml
new file mode 100644
index 00000000..3312f029
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-moon.xml
@@ -0,0 +1,83 @@
+
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-night.xml b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-night.xml
new file mode 100644
index 00000000..c798bad4
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-night.xml
@@ -0,0 +1,83 @@
+
\ No newline at end of file
diff --git a/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-storm.xml b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-storm.xml
new file mode 100644
index 00000000..c0811524
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/styles/tokyonight-storm.xml
@@ -0,0 +1,83 @@
+
\ No newline at end of file
diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml
deleted file mode 100644
index 102fb9a6..00000000
--- a/vendor/github.com/blang/semver/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-language: go
-matrix:
- include:
- - go: 1.4.3
- - go: 1.5.4
- - go: 1.6.3
- - go: 1.7
- - go: tip
- allow_failures:
- - go: tip
-install:
-- go get golang.org/x/tools/cmd/cover
-- go get github.com/mattn/goveralls
-script:
-- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
- -repotoken $COVERALLS_TOKEN
-- echo "Build examples" ; cd examples && go build
-- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
-env:
- global:
- secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=
diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md
deleted file mode 100644
index 08b2e4a3..00000000
--- a/vendor/github.com/blang/semver/README.md
+++ /dev/null
@@ -1,194 +0,0 @@
-semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
-======
-
-semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
-
-Usage
------
-```bash
-$ go get github.com/blang/semver
-```
-Note: Always vendor your dependencies or fix on a specific version tag.
-
-```go
-import github.com/blang/semver
-v1, err := semver.Make("1.0.0-beta")
-v2, err := semver.Make("2.0.0-beta")
-v1.Compare(v2)
-```
-
-Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
-
-Why should I use this lib?
------
-
-- Fully spec compatible
-- No reflection
-- No regex
-- Fully tested (Coverage >99%)
-- Readable parsing/validation errors
-- Fast (See [Benchmarks](#benchmarks))
-- Only Stdlib
-- Uses values instead of pointers
-- Many features, see below
-
-
-Features
------
-
-- Parsing and validation at all levels
-- Comparator-like comparisons
-- Compare Helper Methods
-- InPlace manipulation
-- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
-- Wildcards `>=1.x`, `<=2.5.x`
-- Sortable (implements sort.Interface)
-- database/sql compatible (sql.Scanner/Valuer)
-- encoding/json compatible (json.Marshaler/Unmarshaler)
-
-Ranges
-------
-
-A `Range` is a set of conditions which specify which versions satisfy the range.
-
-A condition is composed of an operator and a version. The supported operators are:
-
-- `<1.0.0` Less than `1.0.0`
-- `<=1.0.0` Less than or equal to `1.0.0`
-- `>1.0.0` Greater than `1.0.0`
-- `>=1.0.0` Greater than or equal to `1.0.0`
-- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
-- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
-
-Note that spaces between the operator and the version will be gracefully tolerated.
-
-A `Range` can link multiple `Ranges` separated by space:
-
-Ranges can be linked by logical AND:
-
- - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
- - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
-
-Ranges can also be linked by logical OR:
-
- - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
-
-AND has a higher precedence than OR. It's not possible to use brackets.
-
-Ranges can be combined by both AND and OR
-
- - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
-
-Range usage:
-
-```
-v, err := semver.Parse("1.2.3")
-range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
-if range(v) {
- //valid
-}
-
-```
-
-Example
------
-
-Have a look at full examples in [examples/main.go](examples/main.go)
-
-```go
-import github.com/blang/semver
-
-v, err := semver.Make("0.0.1-alpha.preview+123.github")
-fmt.Printf("Major: %d\n", v.Major)
-fmt.Printf("Minor: %d\n", v.Minor)
-fmt.Printf("Patch: %d\n", v.Patch)
-fmt.Printf("Pre: %s\n", v.Pre)
-fmt.Printf("Build: %s\n", v.Build)
-
-// Prerelease versions array
-if len(v.Pre) > 0 {
- fmt.Println("Prerelease versions:")
- for i, pre := range v.Pre {
- fmt.Printf("%d: %q\n", i, pre)
- }
-}
-
-// Build meta data array
-if len(v.Build) > 0 {
- fmt.Println("Build meta data:")
- for i, build := range v.Build {
- fmt.Printf("%d: %q\n", i, build)
- }
-}
-
-v001, err := semver.Make("0.0.1")
-// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
-v001.GT(v) == true
-v.LT(v001) == true
-v.GTE(v) == true
-v.LTE(v) == true
-
-// Or use v.Compare(v2) for comparisons (-1, 0, 1):
-v001.Compare(v) == 1
-v.Compare(v001) == -1
-v.Compare(v) == 0
-
-// Manipulate Version in place:
-v.Pre[0], err = semver.NewPRVersion("beta")
-if err != nil {
- fmt.Printf("Error parsing pre release version: %q", err)
-}
-
-fmt.Println("\nValidate versions:")
-v.Build[0] = "?"
-
-err = v.Validate()
-if err != nil {
- fmt.Printf("Validation failed: %s\n", err)
-}
-```
-
-
-Benchmarks
------
-
- BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
- BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
- BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
- BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
- BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
- BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
- BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
- BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
- BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
- BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
- BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
- BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
- BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
- BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
- BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
- BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
- BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
-
-See benchmark cases at [semver_test.go](semver_test.go)
-
-
-Motivation
------
-
-I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
-
-
-Contribution
------
-
-Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
-
-
-License
------
-
-See [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json
deleted file mode 100644
index 1cf8ebdd..00000000
--- a/vendor/github.com/blang/semver/package.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "author": "blang",
- "bugs": {
- "URL": "https://github.com/blang/semver/issues",
- "url": "https://github.com/blang/semver/issues"
- },
- "gx": {
- "dvcsimport": "github.com/blang/semver"
- },
- "gxVersion": "0.10.0",
- "language": "go",
- "license": "MIT",
- "name": "semver",
- "releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
- "version": "3.5.1"
-}
-
diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go
deleted file mode 100644
index fca406d4..00000000
--- a/vendor/github.com/blang/semver/range.go
+++ /dev/null
@@ -1,416 +0,0 @@
-package semver
-
-import (
- "fmt"
- "strconv"
- "strings"
- "unicode"
-)
-
-type wildcardType int
-
-const (
- noneWildcard wildcardType = iota
- majorWildcard wildcardType = 1
- minorWildcard wildcardType = 2
- patchWildcard wildcardType = 3
-)
-
-func wildcardTypefromInt(i int) wildcardType {
- switch i {
- case 1:
- return majorWildcard
- case 2:
- return minorWildcard
- case 3:
- return patchWildcard
- default:
- return noneWildcard
- }
-}
-
-type comparator func(Version, Version) bool
-
-var (
- compEQ comparator = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == 0
- }
- compNE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) != 0
- }
- compGT = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == 1
- }
- compGE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) >= 0
- }
- compLT = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == -1
- }
- compLE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) <= 0
- }
-)
-
-type versionRange struct {
- v Version
- c comparator
-}
-
-// rangeFunc creates a Range from the given versionRange.
-func (vr *versionRange) rangeFunc() Range {
- return Range(func(v Version) bool {
- return vr.c(v, vr.v)
- })
-}
-
-// Range represents a range of versions.
-// A Range can be used to check if a Version satisfies it:
-//
-// range, err := semver.ParseRange(">1.0.0 <2.0.0")
-// range(semver.MustParse("1.1.1") // returns true
-type Range func(Version) bool
-
-// OR combines the existing Range with another Range using logical OR.
-func (rf Range) OR(f Range) Range {
- return Range(func(v Version) bool {
- return rf(v) || f(v)
- })
-}
-
-// AND combines the existing Range with another Range using logical AND.
-func (rf Range) AND(f Range) Range {
- return Range(func(v Version) bool {
- return rf(v) && f(v)
- })
-}
-
-// ParseRange parses a range and returns a Range.
-// If the range could not be parsed an error is returned.
-//
-// Valid ranges are:
-// - "<1.0.0"
-// - "<=1.0.0"
-// - ">1.0.0"
-// - ">=1.0.0"
-// - "1.0.0", "=1.0.0", "==1.0.0"
-// - "!1.0.0", "!=1.0.0"
-//
-// A Range can consist of multiple ranges separated by space:
-// Ranges can be linked by logical AND:
-// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
-// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
-//
-// Ranges can also be linked by logical OR:
-// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
-//
-// AND has a higher precedence than OR. It's not possible to use brackets.
-//
-// Ranges can be combined by both AND and OR
-//
-// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
-func ParseRange(s string) (Range, error) {
- parts := splitAndTrim(s)
- orParts, err := splitORParts(parts)
- if err != nil {
- return nil, err
- }
- expandedParts, err := expandWildcardVersion(orParts)
- if err != nil {
- return nil, err
- }
- var orFn Range
- for _, p := range expandedParts {
- var andFn Range
- for _, ap := range p {
- opStr, vStr, err := splitComparatorVersion(ap)
- if err != nil {
- return nil, err
- }
- vr, err := buildVersionRange(opStr, vStr)
- if err != nil {
- return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
- }
- rf := vr.rangeFunc()
-
- // Set function
- if andFn == nil {
- andFn = rf
- } else { // Combine with existing function
- andFn = andFn.AND(rf)
- }
- }
- if orFn == nil {
- orFn = andFn
- } else {
- orFn = orFn.OR(andFn)
- }
-
- }
- return orFn, nil
-}
-
-// splitORParts splits the already cleaned parts by '||'.
-// Checks for invalid positions of the operator and returns an
-// error if found.
-func splitORParts(parts []string) ([][]string, error) {
- var ORparts [][]string
- last := 0
- for i, p := range parts {
- if p == "||" {
- if i == 0 {
- return nil, fmt.Errorf("First element in range is '||'")
- }
- ORparts = append(ORparts, parts[last:i])
- last = i + 1
- }
- }
- if last == len(parts) {
- return nil, fmt.Errorf("Last element in range is '||'")
- }
- ORparts = append(ORparts, parts[last:])
- return ORparts, nil
-}
-
-// buildVersionRange takes a slice of 2: operator and version
-// and builds a versionRange, otherwise an error.
-func buildVersionRange(opStr, vStr string) (*versionRange, error) {
- c := parseComparator(opStr)
- if c == nil {
- return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
- }
- v, err := Parse(vStr)
- if err != nil {
- return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
- }
-
- return &versionRange{
- v: v,
- c: c,
- }, nil
-
-}
-
-// inArray checks if a byte is contained in an array of bytes
-func inArray(s byte, list []byte) bool {
- for _, el := range list {
- if el == s {
- return true
- }
- }
- return false
-}
-
-// splitAndTrim splits a range string by spaces and cleans whitespaces
-func splitAndTrim(s string) (result []string) {
- last := 0
- var lastChar byte
- excludeFromSplit := []byte{'>', '<', '='}
- for i := 0; i < len(s); i++ {
- if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
- if last < i-1 {
- result = append(result, s[last:i])
- }
- last = i + 1
- } else if s[i] != ' ' {
- lastChar = s[i]
- }
- }
- if last < len(s)-1 {
- result = append(result, s[last:])
- }
-
- for i, v := range result {
- result[i] = strings.Replace(v, " ", "", -1)
- }
-
- // parts := strings.Split(s, " ")
- // for _, x := range parts {
- // if s := strings.TrimSpace(x); len(s) != 0 {
- // result = append(result, s)
- // }
- // }
- return
-}
-
-// splitComparatorVersion splits the comparator from the version.
-// Input must be free of leading or trailing spaces.
-func splitComparatorVersion(s string) (string, string, error) {
- i := strings.IndexFunc(s, unicode.IsDigit)
- if i == -1 {
- return "", "", fmt.Errorf("Could not get version from string: %q", s)
- }
- return strings.TrimSpace(s[0:i]), s[i:], nil
-}
-
-// getWildcardType will return the type of wildcard that the
-// passed version contains
-func getWildcardType(vStr string) wildcardType {
- parts := strings.Split(vStr, ".")
- nparts := len(parts)
- wildcard := parts[nparts-1]
-
- possibleWildcardType := wildcardTypefromInt(nparts)
- if wildcard == "x" {
- return possibleWildcardType
- }
-
- return noneWildcard
-}
-
-// createVersionFromWildcard will convert a wildcard version
-// into a regular version, replacing 'x's with '0's, handling
-// special cases like '1.x.x' and '1.x'
-func createVersionFromWildcard(vStr string) string {
- // handle 1.x.x
- vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
- vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
- parts := strings.Split(vStr2, ".")
-
- // handle 1.x
- if len(parts) == 2 {
- return vStr2 + ".0"
- }
-
- return vStr2
-}
-
-// incrementMajorVersion will increment the major version
-// of the passed version
-func incrementMajorVersion(vStr string) (string, error) {
- parts := strings.Split(vStr, ".")
- i, err := strconv.Atoi(parts[0])
- if err != nil {
- return "", err
- }
- parts[0] = strconv.Itoa(i + 1)
-
- return strings.Join(parts, "."), nil
-}
-
-// incrementMajorVersion will increment the minor version
-// of the passed version
-func incrementMinorVersion(vStr string) (string, error) {
- parts := strings.Split(vStr, ".")
- i, err := strconv.Atoi(parts[1])
- if err != nil {
- return "", err
- }
- parts[1] = strconv.Itoa(i + 1)
-
- return strings.Join(parts, "."), nil
-}
-
-// expandWildcardVersion will expand wildcards inside versions
-// following these rules:
-//
-// * when dealing with patch wildcards:
-// >= 1.2.x will become >= 1.2.0
-// <= 1.2.x will become < 1.3.0
-// > 1.2.x will become >= 1.3.0
-// < 1.2.x will become < 1.2.0
-// != 1.2.x will become < 1.2.0 >= 1.3.0
-//
-// * when dealing with minor wildcards:
-// >= 1.x will become >= 1.0.0
-// <= 1.x will become < 2.0.0
-// > 1.x will become >= 2.0.0
-// < 1.0 will become < 1.0.0
-// != 1.x will become < 1.0.0 >= 2.0.0
-//
-// * when dealing with wildcards without
-// version operator:
-// 1.2.x will become >= 1.2.0 < 1.3.0
-// 1.x will become >= 1.0.0 < 2.0.0
-func expandWildcardVersion(parts [][]string) ([][]string, error) {
- var expandedParts [][]string
- for _, p := range parts {
- var newParts []string
- for _, ap := range p {
- if strings.Index(ap, "x") != -1 {
- opStr, vStr, err := splitComparatorVersion(ap)
- if err != nil {
- return nil, err
- }
-
- versionWildcardType := getWildcardType(vStr)
- flatVersion := createVersionFromWildcard(vStr)
-
- var resultOperator string
- var shouldIncrementVersion bool
- switch opStr {
- case ">":
- resultOperator = ">="
- shouldIncrementVersion = true
- case ">=":
- resultOperator = ">="
- case "<":
- resultOperator = "<"
- case "<=":
- resultOperator = "<"
- shouldIncrementVersion = true
- case "", "=", "==":
- newParts = append(newParts, ">="+flatVersion)
- resultOperator = "<"
- shouldIncrementVersion = true
- case "!=", "!":
- newParts = append(newParts, "<"+flatVersion)
- resultOperator = ">="
- shouldIncrementVersion = true
- }
-
- var resultVersion string
- if shouldIncrementVersion {
- switch versionWildcardType {
- case patchWildcard:
- resultVersion, _ = incrementMinorVersion(flatVersion)
- case minorWildcard:
- resultVersion, _ = incrementMajorVersion(flatVersion)
- }
- } else {
- resultVersion = flatVersion
- }
-
- ap = resultOperator + resultVersion
- }
- newParts = append(newParts, ap)
- }
- expandedParts = append(expandedParts, newParts)
- }
-
- return expandedParts, nil
-}
-
-func parseComparator(s string) comparator {
- switch s {
- case "==":
- fallthrough
- case "":
- fallthrough
- case "=":
- return compEQ
- case ">":
- return compGT
- case ">=":
- return compGE
- case "<":
- return compLT
- case "<=":
- return compLE
- case "!":
- fallthrough
- case "!=":
- return compNE
- }
-
- return nil
-}
-
-// MustParseRange is like ParseRange but panics if the range cannot be parsed.
-func MustParseRange(s string) Range {
- r, err := ParseRange(s)
- if err != nil {
- panic(`semver: ParseRange(` + s + `): ` + err.Error())
- }
- return r
-}
diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/v4/LICENSE
similarity index 100%
rename from vendor/github.com/blang/semver/LICENSE
rename to vendor/github.com/blang/semver/v4/LICENSE
diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/v4/json.go
similarity index 100%
rename from vendor/github.com/blang/semver/json.go
rename to vendor/github.com/blang/semver/v4/json.go
diff --git a/vendor/github.com/blang/semver/v4/range.go b/vendor/github.com/blang/semver/v4/range.go
new file mode 100644
index 00000000..95f7139b
--- /dev/null
+++ b/vendor/github.com/blang/semver/v4/range.go
@@ -0,0 +1,416 @@
+package semver
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type wildcardType int
+
+const (
+ noneWildcard wildcardType = iota
+ majorWildcard wildcardType = 1
+ minorWildcard wildcardType = 2
+ patchWildcard wildcardType = 3
+)
+
+func wildcardTypefromInt(i int) wildcardType {
+ switch i {
+ case 1:
+ return majorWildcard
+ case 2:
+ return minorWildcard
+ case 3:
+ return patchWildcard
+ default:
+ return noneWildcard
+ }
+}
+
+type comparator func(Version, Version) bool
+
+var (
+ compEQ comparator = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == 0
+ }
+ compNE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) != 0
+ }
+ compGT = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == 1
+ }
+ compGE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) >= 0
+ }
+ compLT = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == -1
+ }
+ compLE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) <= 0
+ }
+)
+
+type versionRange struct {
+ v Version
+ c comparator
+}
+
+// rangeFunc creates a Range from the given versionRange.
+func (vr *versionRange) rangeFunc() Range {
+ return Range(func(v Version) bool {
+ return vr.c(v, vr.v)
+ })
+}
+
+// Range represents a range of versions.
+// A Range can be used to check if a Version satisfies it:
+//
+// range, err := semver.ParseRange(">1.0.0 <2.0.0")
+// range(semver.MustParse("1.1.1") // returns true
+type Range func(Version) bool
+
+// OR combines the existing Range with another Range using logical OR.
+func (rf Range) OR(f Range) Range {
+ return Range(func(v Version) bool {
+ return rf(v) || f(v)
+ })
+}
+
+// AND combines the existing Range with another Range using logical AND.
+func (rf Range) AND(f Range) Range {
+ return Range(func(v Version) bool {
+ return rf(v) && f(v)
+ })
+}
+
+// ParseRange parses a range and returns a Range.
+// If the range could not be parsed an error is returned.
+//
+// Valid ranges are:
+// - "<1.0.0"
+// - "<=1.0.0"
+// - ">1.0.0"
+// - ">=1.0.0"
+// - "1.0.0", "=1.0.0", "==1.0.0"
+// - "!1.0.0", "!=1.0.0"
+//
+// A Range can consist of multiple ranges separated by space:
+// Ranges can be linked by logical AND:
+// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
+// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
+//
+// Ranges can also be linked by logical OR:
+// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
+//
+// AND has a higher precedence than OR. It's not possible to use brackets.
+//
+// Ranges can be combined by both AND and OR
+//
+// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
+func ParseRange(s string) (Range, error) {
+ parts := splitAndTrim(s)
+ orParts, err := splitORParts(parts)
+ if err != nil {
+ return nil, err
+ }
+ expandedParts, err := expandWildcardVersion(orParts)
+ if err != nil {
+ return nil, err
+ }
+ var orFn Range
+ for _, p := range expandedParts {
+ var andFn Range
+ for _, ap := range p {
+ opStr, vStr, err := splitComparatorVersion(ap)
+ if err != nil {
+ return nil, err
+ }
+ vr, err := buildVersionRange(opStr, vStr)
+ if err != nil {
+ return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
+ }
+ rf := vr.rangeFunc()
+
+ // Set function
+ if andFn == nil {
+ andFn = rf
+ } else { // Combine with existing function
+ andFn = andFn.AND(rf)
+ }
+ }
+ if orFn == nil {
+ orFn = andFn
+ } else {
+ orFn = orFn.OR(andFn)
+ }
+
+ }
+ return orFn, nil
+}
+
+// splitORParts splits the already cleaned parts by '||'.
+// Checks for invalid positions of the operator and returns an
+// error if found.
+func splitORParts(parts []string) ([][]string, error) {
+ var ORparts [][]string
+ last := 0
+ for i, p := range parts {
+ if p == "||" {
+ if i == 0 {
+ return nil, fmt.Errorf("First element in range is '||'")
+ }
+ ORparts = append(ORparts, parts[last:i])
+ last = i + 1
+ }
+ }
+ if last == len(parts) {
+ return nil, fmt.Errorf("Last element in range is '||'")
+ }
+ ORparts = append(ORparts, parts[last:])
+ return ORparts, nil
+}
+
+// buildVersionRange takes a slice of 2: operator and version
+// and builds a versionRange, otherwise an error.
+func buildVersionRange(opStr, vStr string) (*versionRange, error) {
+ c := parseComparator(opStr)
+ if c == nil {
+ return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
+ }
+ v, err := Parse(vStr)
+ if err != nil {
+ return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
+ }
+
+ return &versionRange{
+ v: v,
+ c: c,
+ }, nil
+
+}
+
+// inArray checks if a byte is contained in an array of bytes
+func inArray(s byte, list []byte) bool {
+ for _, el := range list {
+ if el == s {
+ return true
+ }
+ }
+ return false
+}
+
+// splitAndTrim splits a range string by spaces and cleans whitespaces
+func splitAndTrim(s string) (result []string) {
+ last := 0
+ var lastChar byte
+ excludeFromSplit := []byte{'>', '<', '='}
+ for i := 0; i < len(s); i++ {
+ if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
+ if last < i-1 {
+ result = append(result, s[last:i])
+ }
+ last = i + 1
+ } else if s[i] != ' ' {
+ lastChar = s[i]
+ }
+ }
+ if last < len(s)-1 {
+ result = append(result, s[last:])
+ }
+
+ for i, v := range result {
+ result[i] = strings.Replace(v, " ", "", -1)
+ }
+
+ // parts := strings.Split(s, " ")
+ // for _, x := range parts {
+ // if s := strings.TrimSpace(x); len(s) != 0 {
+ // result = append(result, s)
+ // }
+ // }
+ return
+}
+
+// splitComparatorVersion splits the comparator from the version.
+// Input must be free of leading or trailing spaces.
+func splitComparatorVersion(s string) (string, string, error) {
+ i := strings.IndexFunc(s, unicode.IsDigit)
+ if i == -1 {
+ return "", "", fmt.Errorf("Could not get version from string: %q", s)
+ }
+ return strings.TrimSpace(s[0:i]), s[i:], nil
+}
+
+// getWildcardType will return the type of wildcard that the
+// passed version contains
+func getWildcardType(vStr string) wildcardType {
+ parts := strings.Split(vStr, ".")
+ nparts := len(parts)
+ wildcard := parts[nparts-1]
+
+ possibleWildcardType := wildcardTypefromInt(nparts)
+ if wildcard == "x" {
+ return possibleWildcardType
+ }
+
+ return noneWildcard
+}
+
+// createVersionFromWildcard will convert a wildcard version
+// into a regular version, replacing 'x's with '0's, handling
+// special cases like '1.x.x' and '1.x'
+func createVersionFromWildcard(vStr string) string {
+ // handle 1.x.x
+ vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
+ vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
+ parts := strings.Split(vStr2, ".")
+
+ // handle 1.x
+ if len(parts) == 2 {
+ return vStr2 + ".0"
+ }
+
+ return vStr2
+}
+
+// incrementMajorVersion will increment the major version
+// of the passed version
+func incrementMajorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[0])
+ if err != nil {
+ return "", err
+ }
+ parts[0] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// incrementMajorVersion will increment the minor version
+// of the passed version
+func incrementMinorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return "", err
+ }
+ parts[1] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// expandWildcardVersion will expand wildcards inside versions
+// following these rules:
+//
+// * when dealing with patch wildcards:
+// >= 1.2.x will become >= 1.2.0
+// <= 1.2.x will become < 1.3.0
+// > 1.2.x will become >= 1.3.0
+// < 1.2.x will become < 1.2.0
+// != 1.2.x will become < 1.2.0 >= 1.3.0
+//
+// * when dealing with minor wildcards:
+// >= 1.x will become >= 1.0.0
+// <= 1.x will become < 2.0.0
+// > 1.x will become >= 2.0.0
+// < 1.0 will become < 1.0.0
+// != 1.x will become < 1.0.0 >= 2.0.0
+//
+// * when dealing with wildcards without
+// version operator:
+// 1.2.x will become >= 1.2.0 < 1.3.0
+// 1.x will become >= 1.0.0 < 2.0.0
+func expandWildcardVersion(parts [][]string) ([][]string, error) {
+ var expandedParts [][]string
+ for _, p := range parts {
+ var newParts []string
+ for _, ap := range p {
+ if strings.Contains(ap, "x") {
+ opStr, vStr, err := splitComparatorVersion(ap)
+ if err != nil {
+ return nil, err
+ }
+
+ versionWildcardType := getWildcardType(vStr)
+ flatVersion := createVersionFromWildcard(vStr)
+
+ var resultOperator string
+ var shouldIncrementVersion bool
+ switch opStr {
+ case ">":
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ case ">=":
+ resultOperator = ">="
+ case "<":
+ resultOperator = "<"
+ case "<=":
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "", "=", "==":
+ newParts = append(newParts, ">="+flatVersion)
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "!=", "!":
+ newParts = append(newParts, "<"+flatVersion)
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ }
+
+ var resultVersion string
+ if shouldIncrementVersion {
+ switch versionWildcardType {
+ case patchWildcard:
+ resultVersion, _ = incrementMinorVersion(flatVersion)
+ case minorWildcard:
+ resultVersion, _ = incrementMajorVersion(flatVersion)
+ }
+ } else {
+ resultVersion = flatVersion
+ }
+
+ ap = resultOperator + resultVersion
+ }
+ newParts = append(newParts, ap)
+ }
+ expandedParts = append(expandedParts, newParts)
+ }
+
+ return expandedParts, nil
+}
+
+func parseComparator(s string) comparator {
+ switch s {
+ case "==":
+ fallthrough
+ case "":
+ fallthrough
+ case "=":
+ return compEQ
+ case ">":
+ return compGT
+ case ">=":
+ return compGE
+ case "<":
+ return compLT
+ case "<=":
+ return compLE
+ case "!":
+ fallthrough
+ case "!=":
+ return compNE
+ }
+
+ return nil
+}
+
+// MustParseRange is like ParseRange but panics if the range cannot be parsed.
+func MustParseRange(s string) Range {
+ r, err := ParseRange(s)
+ if err != nil {
+ panic(`semver: ParseRange(` + s + `): ` + err.Error())
+ }
+ return r
+}
diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/v4/semver.go
similarity index 85%
rename from vendor/github.com/blang/semver/semver.go
rename to vendor/github.com/blang/semver/v4/semver.go
index 8ee0842e..307de610 100644
--- a/vendor/github.com/blang/semver/semver.go
+++ b/vendor/github.com/blang/semver/v4/semver.go
@@ -26,7 +26,7 @@ type Version struct {
Minor uint64
Patch uint64
Pre []PRVersion
- Build []string //No Precendence
+ Build []string //No Precedence
}
// Version to string
@@ -61,6 +61,18 @@ func (v Version) String() string {
return string(b)
}
+// FinalizeVersion discards prerelease and build number and only returns
+// major, minor and patch number.
+func (v Version) FinalizeVersion() string {
+ b := make([]byte, 0, 5)
+ b = strconv.AppendUint(b, v.Major, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Minor, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Patch, 10)
+ return string(b)
+}
+
// Equals checks if v is equal to o.
func (v Version) Equals(o Version) bool {
return (v.Compare(o) == 0)
@@ -161,6 +173,27 @@ func (v Version) Compare(o Version) int {
}
+// IncrementPatch increments the patch version
+func (v *Version) IncrementPatch() error {
+ v.Patch++
+ return nil
+}
+
+// IncrementMinor increments the minor version
+func (v *Version) IncrementMinor() error {
+ v.Minor++
+ v.Patch = 0
+ return nil
+}
+
+// IncrementMajor increments the major version
+func (v *Version) IncrementMajor() error {
+ v.Major++
+ v.Minor = 0
+ v.Patch = 0
+ return nil
+}
+
// Validate validates v and returns error in case
func (v Version) Validate() error {
// Major, Minor, Patch already validated using uint64
@@ -189,10 +222,10 @@ func (v Version) Validate() error {
}
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
-func New(s string) (vp *Version, err error) {
+func New(s string) (*Version, error) {
v, err := Parse(s)
- vp = &v
- return
+ vp := &v
+ return vp, err
}
// Make is an alias for Parse, parses version string and returns a validated Version or error
@@ -202,14 +235,25 @@ func Make(s string) (Version, error) {
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
// specs to be parsed by this library. It does so by normalizing versions before passing them to
-// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
-// with only major and minor components specified
+// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions
+// with only major and minor components specified, and removes leading 0s.
func ParseTolerant(s string) (Version, error) {
s = strings.TrimSpace(s)
s = strings.TrimPrefix(s, "v")
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
+ // Remove leading zeros.
+ for i, p := range parts {
+ if len(p) > 1 {
+ p = strings.TrimLeft(p, "0")
+ if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") {
+ p = "0" + p
+ }
+ parts[i] = p
+ }
+ }
+ // Fill up shortened versions.
if len(parts) < 3 {
if strings.ContainsAny(parts[len(parts)-1], "+-") {
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
@@ -217,8 +261,8 @@ func ParseTolerant(s string) (Version, error) {
for len(parts) < 3 {
parts = append(parts, "0")
}
- s = strings.Join(parts, ".")
}
+ s = strings.Join(parts, ".")
return Parse(s)
}
@@ -416,3 +460,17 @@ func NewBuildVersion(s string) (string, error) {
}
return s, nil
}
+
+// FinalizeVersion returns the major, minor and patch number only and discards
+// prerelease and build number.
+func FinalizeVersion(s string) (string, error) {
+ v, err := Parse(s)
+ if err != nil {
+ return "", err
+ }
+ v.Pre = nil
+ v.Build = nil
+
+ finalVer := v.String()
+ return finalVer, nil
+}
diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/v4/sort.go
similarity index 100%
rename from vendor/github.com/blang/semver/sort.go
rename to vendor/github.com/blang/semver/v4/sort.go
diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/v4/sql.go
similarity index 87%
rename from vendor/github.com/blang/semver/sql.go
rename to vendor/github.com/blang/semver/v4/sql.go
index eb4d8026..db958134 100644
--- a/vendor/github.com/blang/semver/sql.go
+++ b/vendor/github.com/blang/semver/v4/sql.go
@@ -14,7 +14,7 @@ func (v *Version) Scan(src interface{}) (err error) {
case []byte:
str = string(src)
default:
- return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
+ return fmt.Errorf("version.Scan: cannot convert %T to string", src)
}
if t, err := Parse(str); err == nil {
diff --git a/vendor/github.com/dlclark/regexp2/regexp.go b/vendor/github.com/dlclark/regexp2/regexp.go
index c8f9e6b0..a7ddbaf3 100644
--- a/vendor/github.com/dlclark/regexp2/regexp.go
+++ b/vendor/github.com/dlclark/regexp2/regexp.go
@@ -18,8 +18,12 @@ import (
"github.com/dlclark/regexp2/syntax"
)
-// Default timeout used when running regexp matches -- "forever"
-var DefaultMatchTimeout = time.Duration(math.MaxInt64)
+var (
+ // DefaultMatchTimeout used when running regexp matches -- "forever"
+ DefaultMatchTimeout = time.Duration(math.MaxInt64)
+ // DefaultUnmarshalOptions used when unmarshaling a regex from text
+ DefaultUnmarshalOptions = None
+)
// Regexp is the representation of a compiled regular expression.
// A Regexp is safe for concurrent use by multiple goroutines.
@@ -43,7 +47,7 @@ type Regexp struct {
code *syntax.Code // compiled program
// cache of machines for running regexp
- muRun sync.Mutex
+ muRun *sync.Mutex
runner []*runner
}
@@ -72,6 +76,7 @@ func Compile(expr string, opt RegexOptions) (*Regexp, error) {
capsize: code.Capsize,
code: code,
MatchTimeout: DefaultMatchTimeout,
+ muRun: &sync.Mutex{},
}, nil
}
@@ -371,3 +376,20 @@ func (re *Regexp) GroupNumberFromName(name string) int {
return -1
}
+
+// MarshalText implements [encoding.TextMarshaler]. The output
+// matches that of calling the [Regexp.String] method.
+func (re *Regexp) MarshalText() ([]byte, error) {
+ return []byte(re.String()), nil
+}
+
+// UnmarshalText implements [encoding.TextUnmarshaler] by calling
+// [Compile] on the encoded value.
+func (re *Regexp) UnmarshalText(text []byte) error {
+ newRE, err := Compile(string(text), DefaultUnmarshalOptions)
+ if err != nil {
+ return err
+ }
+ *re = *newRE
+ return nil
+}
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
deleted file mode 100644
index ba95cdd1..00000000
--- a/vendor/github.com/dustin/go-humanize/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.3.x
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - master
-matrix:
- allow_failures:
- - go: master
- fast_finish: true
-install:
- - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
-script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d -s .)
- - go tool vet .
- - go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
deleted file mode 100644
index 8d9a94a9..00000000
--- a/vendor/github.com/dustin/go-humanize/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright (c) 2005-2008 Dustin Sallings
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
deleted file mode 100644
index 91b4ae56..00000000
--- a/vendor/github.com/dustin/go-humanize/README.markdown
+++ /dev/null
@@ -1,124 +0,0 @@
-# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
-
-Just a few functions for helping humanize times and sizes.
-
-`go get` it as `github.com/dustin/go-humanize`, import it as
-`"github.com/dustin/go-humanize"`, use it as `humanize`.
-
-See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
-complete documentation.
-
-## Sizes
-
-This lets you take numbers like `82854982` and convert them to useful
-strings like, `83 MB` or `79 MiB` (whichever you prefer).
-
-Example:
-
-```go
-fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
-```
-
-## Times
-
-This lets you take a `time.Time` and spit it out in relative terms.
-For example, `12 seconds ago` or `3 days from now`.
-
-Example:
-
-```go
-fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
-```
-
-Thanks to Kyle Lemons for the time implementation from an IRC
-conversation one day. It's pretty neat.
-
-## Ordinals
-
-From a [mailing list discussion][odisc] where a user wanted to be able
-to label ordinals.
-
- 0 -> 0th
- 1 -> 1st
- 2 -> 2nd
- 3 -> 3rd
- 4 -> 4th
- [...]
-
-Example:
-
-```go
-fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
-```
-
-## Commas
-
-Want to shove commas into numbers? Be my guest.
-
- 0 -> 0
- 100 -> 100
- 1000 -> 1,000
- 1000000000 -> 1,000,000,000
- -100000 -> -100,000
-
-Example:
-
-```go
-fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
-```
-
-## Ftoa
-
-Nicer float64 formatter that removes trailing zeros.
-
-```go
-fmt.Printf("%f", 2.24) // 2.240000
-fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
-fmt.Printf("%f", 2.0) // 2.000000
-fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
-```
-
-## SI notation
-
-Format numbers with [SI notation][sinotation].
-
-Example:
-
-```go
-humanize.SI(0.00000000223, "M") // 2.23 nM
-```
-
-## English-specific functions
-
-The following functions are in the `humanize/english` subpackage.
-
-### Plurals
-
-Simple English pluralization
-
-```go
-english.PluralWord(1, "object", "") // object
-english.PluralWord(42, "object", "") // objects
-english.PluralWord(2, "bus", "") // buses
-english.PluralWord(99, "locus", "loci") // loci
-
-english.Plural(1, "object", "") // 1 object
-english.Plural(42, "object", "") // 42 objects
-english.Plural(2, "bus", "") // 2 buses
-english.Plural(99, "locus", "loci") // 99 loci
-```
-
-### Word series
-
-Format comma-separated words lists with conjuctions:
-
-```go
-english.WordSeries([]string{"foo"}, "and") // foo
-english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
-english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
-
-english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
-```
-
-[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
-[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
deleted file mode 100644
index f49dc337..00000000
--- a/vendor/github.com/dustin/go-humanize/big.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package humanize
-
-import (
- "math/big"
-)
-
-// order of magnitude (to a max order)
-func oomm(n, b *big.Int, maxmag int) (float64, int) {
- mag := 0
- m := &big.Int{}
- for n.Cmp(b) >= 0 {
- n.DivMod(n, b, m)
- mag++
- if mag == maxmag && maxmag >= 0 {
- break
- }
- }
- return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
-}
-
-// total order of magnitude
-// (same as above, but with no upper limit)
-func oom(n, b *big.Int) (float64, int) {
- mag := 0
- m := &big.Int{}
- for n.Cmp(b) >= 0 {
- n.DivMod(n, b, m)
- mag++
- }
- return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
-}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
deleted file mode 100644
index 1a2bf617..00000000
--- a/vendor/github.com/dustin/go-humanize/bigbytes.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math/big"
- "strings"
- "unicode"
-)
-
-var (
- bigIECExp = big.NewInt(1024)
-
- // BigByte is one byte in bit.Ints
- BigByte = big.NewInt(1)
- // BigKiByte is 1,024 bytes in bit.Ints
- BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
- // BigMiByte is 1,024 k bytes in bit.Ints
- BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
- // BigGiByte is 1,024 m bytes in bit.Ints
- BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
- // BigTiByte is 1,024 g bytes in bit.Ints
- BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
- // BigPiByte is 1,024 t bytes in bit.Ints
- BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
- // BigEiByte is 1,024 p bytes in bit.Ints
- BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
- // BigZiByte is 1,024 e bytes in bit.Ints
- BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
- // BigYiByte is 1,024 z bytes in bit.Ints
- BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
-)
-
-var (
- bigSIExp = big.NewInt(1000)
-
- // BigSIByte is one SI byte in big.Ints
- BigSIByte = big.NewInt(1)
- // BigKByte is 1,000 SI bytes in big.Ints
- BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
- // BigMByte is 1,000 SI k bytes in big.Ints
- BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
- // BigGByte is 1,000 SI m bytes in big.Ints
- BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
- // BigTByte is 1,000 SI g bytes in big.Ints
- BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
- // BigPByte is 1,000 SI t bytes in big.Ints
- BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
- // BigEByte is 1,000 SI p bytes in big.Ints
- BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
- // BigZByte is 1,000 SI e bytes in big.Ints
- BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
- // BigYByte is 1,000 SI z bytes in big.Ints
- BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
-)
-
-var bigBytesSizeTable = map[string]*big.Int{
- "b": BigByte,
- "kib": BigKiByte,
- "kb": BigKByte,
- "mib": BigMiByte,
- "mb": BigMByte,
- "gib": BigGiByte,
- "gb": BigGByte,
- "tib": BigTiByte,
- "tb": BigTByte,
- "pib": BigPiByte,
- "pb": BigPByte,
- "eib": BigEiByte,
- "eb": BigEByte,
- "zib": BigZiByte,
- "zb": BigZByte,
- "yib": BigYiByte,
- "yb": BigYByte,
- // Without suffix
- "": BigByte,
- "ki": BigKiByte,
- "k": BigKByte,
- "mi": BigMiByte,
- "m": BigMByte,
- "gi": BigGiByte,
- "g": BigGByte,
- "ti": BigTiByte,
- "t": BigTByte,
- "pi": BigPiByte,
- "p": BigPByte,
- "ei": BigEiByte,
- "e": BigEByte,
- "z": BigZByte,
- "zi": BigZiByte,
- "y": BigYByte,
- "yi": BigYiByte,
-}
-
-var ten = big.NewInt(10)
-
-func humanateBigBytes(s, base *big.Int, sizes []string) string {
- if s.Cmp(ten) < 0 {
- return fmt.Sprintf("%d B", s)
- }
- c := (&big.Int{}).Set(s)
- val, mag := oomm(c, base, len(sizes)-1)
- suffix := sizes[mag]
- f := "%.0f %s"
- if val < 10 {
- f = "%.1f %s"
- }
-
- return fmt.Sprintf(f, val, suffix)
-
-}
-
-// BigBytes produces a human readable representation of an SI size.
-//
-// See also: ParseBigBytes.
-//
-// BigBytes(82854982) -> 83 MB
-func BigBytes(s *big.Int) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
- return humanateBigBytes(s, bigSIExp, sizes)
-}
-
-// BigIBytes produces a human readable representation of an IEC size.
-//
-// See also: ParseBigBytes.
-//
-// BigIBytes(82854982) -> 79 MiB
-func BigIBytes(s *big.Int) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
- return humanateBigBytes(s, bigIECExp, sizes)
-}
-
-// ParseBigBytes parses a string representation of bytes into the number
-// of bytes it represents.
-//
-// See also: BigBytes, BigIBytes.
-//
-// ParseBigBytes("42 MB") -> 42000000, nil
-// ParseBigBytes("42 mib") -> 44040192, nil
-func ParseBigBytes(s string) (*big.Int, error) {
- lastDigit := 0
- hasComma := false
- for _, r := range s {
- if !(unicode.IsDigit(r) || r == '.' || r == ',') {
- break
- }
- if r == ',' {
- hasComma = true
- }
- lastDigit++
- }
-
- num := s[:lastDigit]
- if hasComma {
- num = strings.Replace(num, ",", "", -1)
- }
-
- val := &big.Rat{}
- _, err := fmt.Sscanf(num, "%f", val)
- if err != nil {
- return nil, err
- }
-
- extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
- if m, ok := bigBytesSizeTable[extra]; ok {
- mv := (&big.Rat{}).SetInt(m)
- val.Mul(val, mv)
- rv := &big.Int{}
- rv.Div(val.Num(), val.Denom())
- return rv, nil
- }
-
- return nil, fmt.Errorf("unhandled size name: %v", extra)
-}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
deleted file mode 100644
index 0b498f48..00000000
--- a/vendor/github.com/dustin/go-humanize/bytes.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "unicode"
-)
-
-// IEC Sizes.
-// kibis of bits
-const (
- Byte = 1 << (iota * 10)
- KiByte
- MiByte
- GiByte
- TiByte
- PiByte
- EiByte
-)
-
-// SI Sizes.
-const (
- IByte = 1
- KByte = IByte * 1000
- MByte = KByte * 1000
- GByte = MByte * 1000
- TByte = GByte * 1000
- PByte = TByte * 1000
- EByte = PByte * 1000
-)
-
-var bytesSizeTable = map[string]uint64{
- "b": Byte,
- "kib": KiByte,
- "kb": KByte,
- "mib": MiByte,
- "mb": MByte,
- "gib": GiByte,
- "gb": GByte,
- "tib": TiByte,
- "tb": TByte,
- "pib": PiByte,
- "pb": PByte,
- "eib": EiByte,
- "eb": EByte,
- // Without suffix
- "": Byte,
- "ki": KiByte,
- "k": KByte,
- "mi": MiByte,
- "m": MByte,
- "gi": GiByte,
- "g": GByte,
- "ti": TiByte,
- "t": TByte,
- "pi": PiByte,
- "p": PByte,
- "ei": EiByte,
- "e": EByte,
-}
-
-func logn(n, b float64) float64 {
- return math.Log(n) / math.Log(b)
-}
-
-func humanateBytes(s uint64, base float64, sizes []string) string {
- if s < 10 {
- return fmt.Sprintf("%d B", s)
- }
- e := math.Floor(logn(float64(s), base))
- suffix := sizes[int(e)]
- val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
- f := "%.0f %s"
- if val < 10 {
- f = "%.1f %s"
- }
-
- return fmt.Sprintf(f, val, suffix)
-}
-
-// Bytes produces a human readable representation of an SI size.
-//
-// See also: ParseBytes.
-//
-// Bytes(82854982) -> 83 MB
-func Bytes(s uint64) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
- return humanateBytes(s, 1000, sizes)
-}
-
-// IBytes produces a human readable representation of an IEC size.
-//
-// See also: ParseBytes.
-//
-// IBytes(82854982) -> 79 MiB
-func IBytes(s uint64) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
- return humanateBytes(s, 1024, sizes)
-}
-
-// ParseBytes parses a string representation of bytes into the number
-// of bytes it represents.
-//
-// See Also: Bytes, IBytes.
-//
-// ParseBytes("42 MB") -> 42000000, nil
-// ParseBytes("42 mib") -> 44040192, nil
-func ParseBytes(s string) (uint64, error) {
- lastDigit := 0
- hasComma := false
- for _, r := range s {
- if !(unicode.IsDigit(r) || r == '.' || r == ',') {
- break
- }
- if r == ',' {
- hasComma = true
- }
- lastDigit++
- }
-
- num := s[:lastDigit]
- if hasComma {
- num = strings.Replace(num, ",", "", -1)
- }
-
- f, err := strconv.ParseFloat(num, 64)
- if err != nil {
- return 0, err
- }
-
- extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
- if m, ok := bytesSizeTable[extra]; ok {
- f *= float64(m)
- if f >= math.MaxUint64 {
- return 0, fmt.Errorf("too large: %v", s)
- }
- return uint64(f), nil
- }
-
- return 0, fmt.Errorf("unhandled size name: %v", extra)
-}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
deleted file mode 100644
index 520ae3e5..00000000
--- a/vendor/github.com/dustin/go-humanize/comma.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package humanize
-
-import (
- "bytes"
- "math"
- "math/big"
- "strconv"
- "strings"
-)
-
-// Comma produces a string form of the given number in base 10 with
-// commas after every three orders of magnitude.
-//
-// e.g. Comma(834142) -> 834,142
-func Comma(v int64) string {
- sign := ""
-
- // Min int64 can't be negated to a usable value, so it has to be special cased.
- if v == math.MinInt64 {
- return "-9,223,372,036,854,775,808"
- }
-
- if v < 0 {
- sign = "-"
- v = 0 - v
- }
-
- parts := []string{"", "", "", "", "", "", ""}
- j := len(parts) - 1
-
- for v > 999 {
- parts[j] = strconv.FormatInt(v%1000, 10)
- switch len(parts[j]) {
- case 2:
- parts[j] = "0" + parts[j]
- case 1:
- parts[j] = "00" + parts[j]
- }
- v = v / 1000
- j--
- }
- parts[j] = strconv.Itoa(int(v))
- return sign + strings.Join(parts[j:], ",")
-}
-
-// Commaf produces a string form of the given number in base 10 with
-// commas after every three orders of magnitude.
-//
-// e.g. Commaf(834142.32) -> 834,142.32
-func Commaf(v float64) string {
- buf := &bytes.Buffer{}
- if v < 0 {
- buf.Write([]byte{'-'})
- v = 0 - v
- }
-
- comma := []byte{','}
-
- parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
- pos := 0
- if len(parts[0])%3 != 0 {
- pos += len(parts[0]) % 3
- buf.WriteString(parts[0][:pos])
- buf.Write(comma)
- }
- for ; pos < len(parts[0]); pos += 3 {
- buf.WriteString(parts[0][pos : pos+3])
- buf.Write(comma)
- }
- buf.Truncate(buf.Len() - 1)
-
- if len(parts) > 1 {
- buf.Write([]byte{'.'})
- buf.WriteString(parts[1])
- }
- return buf.String()
-}
-
-// CommafWithDigits works like the Commaf but limits the resulting
-// string to the given number of decimal places.
-//
-// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
-func CommafWithDigits(f float64, decimals int) string {
- return stripTrailingDigits(Commaf(f), decimals)
-}
-
-// BigComma produces a string form of the given big.Int in base 10
-// with commas after every three orders of magnitude.
-func BigComma(b *big.Int) string {
- sign := ""
- if b.Sign() < 0 {
- sign = "-"
- b.Abs(b)
- }
-
- athousand := big.NewInt(1000)
- c := (&big.Int{}).Set(b)
- _, m := oom(c, athousand)
- parts := make([]string, m+1)
- j := len(parts) - 1
-
- mod := &big.Int{}
- for b.Cmp(athousand) >= 0 {
- b.DivMod(b, athousand, mod)
- parts[j] = strconv.FormatInt(mod.Int64(), 10)
- switch len(parts[j]) {
- case 2:
- parts[j] = "0" + parts[j]
- case 1:
- parts[j] = "00" + parts[j]
- }
- j--
- }
- parts[j] = strconv.Itoa(int(b.Int64()))
- return sign + strings.Join(parts[j:], ",")
-}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
deleted file mode 100644
index 620690de..00000000
--- a/vendor/github.com/dustin/go-humanize/commaf.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// +build go1.6
-
-package humanize
-
-import (
- "bytes"
- "math/big"
- "strings"
-)
-
-// BigCommaf produces a string form of the given big.Float in base 10
-// with commas after every three orders of magnitude.
-func BigCommaf(v *big.Float) string {
- buf := &bytes.Buffer{}
- if v.Sign() < 0 {
- buf.Write([]byte{'-'})
- v.Abs(v)
- }
-
- comma := []byte{','}
-
- parts := strings.Split(v.Text('f', -1), ".")
- pos := 0
- if len(parts[0])%3 != 0 {
- pos += len(parts[0]) % 3
- buf.WriteString(parts[0][:pos])
- buf.Write(comma)
- }
- for ; pos < len(parts[0]); pos += 3 {
- buf.WriteString(parts[0][pos : pos+3])
- buf.Write(comma)
- }
- buf.Truncate(buf.Len() - 1)
-
- if len(parts) > 1 {
- buf.Write([]byte{'.'})
- buf.WriteString(parts[1])
- }
- return buf.String()
-}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
deleted file mode 100644
index 1c62b640..00000000
--- a/vendor/github.com/dustin/go-humanize/ftoa.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package humanize
-
-import (
- "strconv"
- "strings"
-)
-
-func stripTrailingZeros(s string) string {
- offset := len(s) - 1
- for offset > 0 {
- if s[offset] == '.' {
- offset--
- break
- }
- if s[offset] != '0' {
- break
- }
- offset--
- }
- return s[:offset+1]
-}
-
-func stripTrailingDigits(s string, digits int) string {
- if i := strings.Index(s, "."); i >= 0 {
- if digits <= 0 {
- return s[:i]
- }
- i++
- if i+digits >= len(s) {
- return s
- }
- return s[:i+digits]
- }
- return s
-}
-
-// Ftoa converts a float to a string with no trailing zeros.
-func Ftoa(num float64) string {
- return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
-}
-
-// FtoaWithDigits converts a float to a string but limits the resulting string
-// to the given number of decimal places, and no trailing zeros.
-func FtoaWithDigits(num float64, digits int) string {
- return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
-}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
deleted file mode 100644
index a2c2da31..00000000
--- a/vendor/github.com/dustin/go-humanize/humanize.go
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
-Package humanize converts boring ugly numbers to human-friendly strings and back.
-
-Durations can be turned into strings such as "3 days ago", numbers
-representing sizes like 82854982 into useful strings like, "83 MB" or
-"79 MiB" (whichever you prefer).
-*/
-package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
deleted file mode 100644
index dec61865..00000000
--- a/vendor/github.com/dustin/go-humanize/number.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package humanize
-
-/*
-Slightly adapted from the source to fit go-humanize.
-
-Author: https://github.com/gorhill
-Source: https://gist.github.com/gorhill/5285193
-
-*/
-
-import (
- "math"
- "strconv"
-)
-
-var (
- renderFloatPrecisionMultipliers = [...]float64{
- 1,
- 10,
- 100,
- 1000,
- 10000,
- 100000,
- 1000000,
- 10000000,
- 100000000,
- 1000000000,
- }
-
- renderFloatPrecisionRounders = [...]float64{
- 0.5,
- 0.05,
- 0.005,
- 0.0005,
- 0.00005,
- 0.000005,
- 0.0000005,
- 0.00000005,
- 0.000000005,
- 0.0000000005,
- }
-)
-
-// FormatFloat produces a formatted number as string based on the following user-specified criteria:
-// * thousands separator
-// * decimal separator
-// * decimal precision
-//
-// Usage: s := RenderFloat(format, n)
-// The format parameter tells how to render the number n.
-//
-// See examples: http://play.golang.org/p/LXc1Ddm1lJ
-//
-// Examples of format strings, given n = 12345.6789:
-// "#,###.##" => "12,345.67"
-// "#,###." => "12,345"
-// "#,###" => "12345,678"
-// "#\u202F###,##" => "12 345,68"
-// "#.###,###### => 12.345,678900
-// "" (aka default format) => 12,345.67
-//
-// The highest precision allowed is 9 digits after the decimal symbol.
-// There is also a version for integer number, FormatInteger(),
-// which is convenient for calls within template.
-func FormatFloat(format string, n float64) string {
- // Special cases:
- // NaN = "NaN"
- // +Inf = "+Infinity"
- // -Inf = "-Infinity"
- if math.IsNaN(n) {
- return "NaN"
- }
- if n > math.MaxFloat64 {
- return "Infinity"
- }
- if n < -math.MaxFloat64 {
- return "-Infinity"
- }
-
- // default format
- precision := 2
- decimalStr := "."
- thousandStr := ","
- positiveStr := ""
- negativeStr := "-"
-
- if len(format) > 0 {
- format := []rune(format)
-
- // If there is an explicit format directive,
- // then default values are these:
- precision = 9
- thousandStr = ""
-
- // collect indices of meaningful formatting directives
- formatIndx := []int{}
- for i, char := range format {
- if char != '#' && char != '0' {
- formatIndx = append(formatIndx, i)
- }
- }
-
- if len(formatIndx) > 0 {
- // Directive at index 0:
- // Must be a '+'
- // Raise an error if not the case
- // index: 0123456789
- // +0.000,000
- // +000,000.0
- // +0000.00
- // +0000
- if formatIndx[0] == 0 {
- if format[formatIndx[0]] != '+' {
- panic("RenderFloat(): invalid positive sign directive")
- }
- positiveStr = "+"
- formatIndx = formatIndx[1:]
- }
-
- // Two directives:
- // First is thousands separator
- // Raise an error if not followed by 3-digit
- // 0123456789
- // 0.000,000
- // 000,000.00
- if len(formatIndx) == 2 {
- if (formatIndx[1] - formatIndx[0]) != 4 {
- panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
- }
- thousandStr = string(format[formatIndx[0]])
- formatIndx = formatIndx[1:]
- }
-
- // One directive:
- // Directive is decimal separator
- // The number of digit-specifier following the separator indicates wanted precision
- // 0123456789
- // 0.00
- // 000,0000
- if len(formatIndx) == 1 {
- decimalStr = string(format[formatIndx[0]])
- precision = len(format) - formatIndx[0] - 1
- }
- }
- }
-
- // generate sign part
- var signStr string
- if n >= 0.000000001 {
- signStr = positiveStr
- } else if n <= -0.000000001 {
- signStr = negativeStr
- n = -n
- } else {
- signStr = ""
- n = 0.0
- }
-
- // split number into integer and fractional parts
- intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
-
- // generate integer part string
- intStr := strconv.FormatInt(int64(intf), 10)
-
- // add thousand separator if required
- if len(thousandStr) > 0 {
- for i := len(intStr); i > 3; {
- i -= 3
- intStr = intStr[:i] + thousandStr + intStr[i:]
- }
- }
-
- // no fractional part, we can leave now
- if precision == 0 {
- return signStr + intStr
- }
-
- // generate fractional part
- fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
- // may need padding
- if len(fracStr) < precision {
- fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
- }
-
- return signStr + intStr + decimalStr + fracStr
-}
-
-// FormatInteger produces a formatted number as string.
-// See FormatFloat.
-func FormatInteger(format string, n int) string {
- return FormatFloat(format, float64(n))
-}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
deleted file mode 100644
index 43d88a86..00000000
--- a/vendor/github.com/dustin/go-humanize/ordinals.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package humanize
-
-import "strconv"
-
-// Ordinal gives you the input number in a rank/ordinal format.
-//
-// Ordinal(3) -> 3rd
-func Ordinal(x int) string {
- suffix := "th"
- switch x % 10 {
- case 1:
- if x%100 != 11 {
- suffix = "st"
- }
- case 2:
- if x%100 != 12 {
- suffix = "nd"
- }
- case 3:
- if x%100 != 13 {
- suffix = "rd"
- }
- }
- return strconv.Itoa(x) + suffix
-}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
deleted file mode 100644
index ae659e0e..00000000
--- a/vendor/github.com/dustin/go-humanize/si.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package humanize
-
-import (
- "errors"
- "math"
- "regexp"
- "strconv"
-)
-
-var siPrefixTable = map[float64]string{
- -24: "y", // yocto
- -21: "z", // zepto
- -18: "a", // atto
- -15: "f", // femto
- -12: "p", // pico
- -9: "n", // nano
- -6: "µ", // micro
- -3: "m", // milli
- 0: "",
- 3: "k", // kilo
- 6: "M", // mega
- 9: "G", // giga
- 12: "T", // tera
- 15: "P", // peta
- 18: "E", // exa
- 21: "Z", // zetta
- 24: "Y", // yotta
-}
-
-var revSIPrefixTable = revfmap(siPrefixTable)
-
-// revfmap reverses the map and precomputes the power multiplier
-func revfmap(in map[float64]string) map[string]float64 {
- rv := map[string]float64{}
- for k, v := range in {
- rv[v] = math.Pow(10, k)
- }
- return rv
-}
-
-var riParseRegex *regexp.Regexp
-
-func init() {
- ri := `^([\-0-9.]+)\s?([`
- for _, v := range siPrefixTable {
- ri += v
- }
- ri += `]?)(.*)`
-
- riParseRegex = regexp.MustCompile(ri)
-}
-
-// ComputeSI finds the most appropriate SI prefix for the given number
-// and returns the prefix along with the value adjusted to be within
-// that prefix.
-//
-// See also: SI, ParseSI.
-//
-// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
-func ComputeSI(input float64) (float64, string) {
- if input == 0 {
- return 0, ""
- }
- mag := math.Abs(input)
- exponent := math.Floor(logn(mag, 10))
- exponent = math.Floor(exponent/3) * 3
-
- value := mag / math.Pow(10, exponent)
-
- // Handle special case where value is exactly 1000.0
- // Should return 1 M instead of 1000 k
- if value == 1000.0 {
- exponent += 3
- value = mag / math.Pow(10, exponent)
- }
-
- value = math.Copysign(value, input)
-
- prefix := siPrefixTable[exponent]
- return value, prefix
-}
-
-// SI returns a string with default formatting.
-//
-// SI uses Ftoa to format float value, removing trailing zeros.
-//
-// See also: ComputeSI, ParseSI.
-//
-// e.g. SI(1000000, "B") -> 1 MB
-// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
-func SI(input float64, unit string) string {
- value, prefix := ComputeSI(input)
- return Ftoa(value) + " " + prefix + unit
-}
-
-// SIWithDigits works like SI but limits the resulting string to the
-// given number of decimal places.
-//
-// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
-// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
-func SIWithDigits(input float64, decimals int, unit string) string {
- value, prefix := ComputeSI(input)
- return FtoaWithDigits(value, decimals) + " " + prefix + unit
-}
-
-var errInvalid = errors.New("invalid input")
-
-// ParseSI parses an SI string back into the number and unit.
-//
-// See also: SI, ComputeSI.
-//
-// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
-func ParseSI(input string) (float64, string, error) {
- found := riParseRegex.FindStringSubmatch(input)
- if len(found) != 4 {
- return 0, "", errInvalid
- }
- mag := revSIPrefixTable[found[2]]
- unit := found[3]
-
- base, err := strconv.ParseFloat(found[1], 64)
- return base * mag, unit, err
-}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
deleted file mode 100644
index dd3fbf5e..00000000
--- a/vendor/github.com/dustin/go-humanize/times.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math"
- "sort"
- "time"
-)
-
-// Seconds-based time units
-const (
- Day = 24 * time.Hour
- Week = 7 * Day
- Month = 30 * Day
- Year = 12 * Month
- LongTime = 37 * Year
-)
-
-// Time formats a time into a relative string.
-//
-// Time(someT) -> "3 weeks ago"
-func Time(then time.Time) string {
- return RelTime(then, time.Now(), "ago", "from now")
-}
-
-// A RelTimeMagnitude struct contains a relative time point at which
-// the relative format of time will switch to a new format string. A
-// slice of these in ascending order by their "D" field is passed to
-// CustomRelTime to format durations.
-//
-// The Format field is a string that may contain a "%s" which will be
-// replaced with the appropriate signed label (e.g. "ago" or "from
-// now") and a "%d" that will be replaced by the quantity.
-//
-// The DivBy field is the amount of time the time difference must be
-// divided by in order to display correctly.
-//
-// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
-// DivBy should be time.Minute so whatever the duration is will be
-// expressed in minutes.
-type RelTimeMagnitude struct {
- D time.Duration
- Format string
- DivBy time.Duration
-}
-
-var defaultMagnitudes = []RelTimeMagnitude{
- {time.Second, "now", time.Second},
- {2 * time.Second, "1 second %s", 1},
- {time.Minute, "%d seconds %s", time.Second},
- {2 * time.Minute, "1 minute %s", 1},
- {time.Hour, "%d minutes %s", time.Minute},
- {2 * time.Hour, "1 hour %s", 1},
- {Day, "%d hours %s", time.Hour},
- {2 * Day, "1 day %s", 1},
- {Week, "%d days %s", Day},
- {2 * Week, "1 week %s", 1},
- {Month, "%d weeks %s", Week},
- {2 * Month, "1 month %s", 1},
- {Year, "%d months %s", Month},
- {18 * Month, "1 year %s", 1},
- {2 * Year, "2 years %s", 1},
- {LongTime, "%d years %s", Year},
- {math.MaxInt64, "a long while %s", 1},
-}
-
-// RelTime formats a time into a relative string.
-//
-// It takes two times and two labels. In addition to the generic time
-// delta string (e.g. 5 minutes), the labels are used applied so that
-// the label corresponding to the smaller time is applied.
-//
-// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
-func RelTime(a, b time.Time, albl, blbl string) string {
- return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
-}
-
-// CustomRelTime formats a time into a relative string.
-//
-// It takes two times two labels and a table of relative time formats.
-// In addition to the generic time delta string (e.g. 5 minutes), the
-// labels are used applied so that the label corresponding to the
-// smaller time is applied.
-func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
- lbl := albl
- diff := b.Sub(a)
-
- if a.After(b) {
- lbl = blbl
- diff = a.Sub(b)
- }
-
- n := sort.Search(len(magnitudes), func(i int) bool {
- return magnitudes[i].D > diff
- })
-
- if n >= len(magnitudes) {
- n = len(magnitudes) - 1
- }
- mag := magnitudes[n]
- args := []interface{}{}
- escaped := false
- for _, ch := range mag.Format {
- if escaped {
- switch ch {
- case 's':
- args = append(args, lbl)
- case 'd':
- args = append(args, diff/mag.DivBy)
- }
- escaped = false
- } else {
- escaped = ch == '%'
- }
- }
- return fmt.Sprintf(mag.Format, args...)
-}
diff --git a/vendor/github.com/dyatlov/go-opengraph/LICENSE b/vendor/github.com/dyatlov/go-opengraph/opengraph/LICENSE
similarity index 100%
rename from vendor/github.com/dyatlov/go-opengraph/LICENSE
rename to vendor/github.com/dyatlov/go-opengraph/opengraph/LICENSE
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go
index fa18673a..089d670a 100644
--- a/vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go
@@ -8,81 +8,37 @@ import (
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
-)
-
-// Image defines Open Graph Image type
-type Image struct {
- URL string `json:"url"`
- SecureURL string `json:"secure_url"`
- Type string `json:"type"`
- Width uint64 `json:"width"`
- Height uint64 `json:"height"`
- draft bool `json:"-"`
-}
-
-// Video defines Open Graph Video type
-type Video struct {
- URL string `json:"url"`
- SecureURL string `json:"secure_url"`
- Type string `json:"type"`
- Width uint64 `json:"width"`
- Height uint64 `json:"height"`
- draft bool `json:"-"`
-}
-
-// Audio defines Open Graph Audio Type
-type Audio struct {
- URL string `json:"url"`
- SecureURL string `json:"secure_url"`
- Type string `json:"type"`
- draft bool `json:"-"`
-}
-
-// Article contain Open Graph Article structure
-type Article struct {
- PublishedTime *time.Time `json:"published_time"`
- ModifiedTime *time.Time `json:"modified_time"`
- ExpirationTime *time.Time `json:"expiration_time"`
- Section string `json:"section"`
- Tags []string `json:"tags"`
- Authors []*Profile `json:"authors"`
-}
-
-// Profile contains Open Graph Profile structure
-type Profile struct {
- FirstName string `json:"first_name"`
- LastName string `json:"last_name"`
- Username string `json:"username"`
- Gender string `json:"gender"`
-}
-// Book contains Open Graph Book structure
-type Book struct {
- ISBN string `json:"isbn"`
- ReleaseDate *time.Time `json:"release_date"`
- Tags []string `json:"tags"`
- Authors []*Profile `json:"authors"`
-}
+ "github.com/dyatlov/go-opengraph/opengraph/types/actor"
+ "github.com/dyatlov/go-opengraph/opengraph/types/article"
+ "github.com/dyatlov/go-opengraph/opengraph/types/audio"
+ "github.com/dyatlov/go-opengraph/opengraph/types/book"
+ "github.com/dyatlov/go-opengraph/opengraph/types/image"
+ "github.com/dyatlov/go-opengraph/opengraph/types/music"
+ "github.com/dyatlov/go-opengraph/opengraph/types/profile"
+ "github.com/dyatlov/go-opengraph/opengraph/types/video"
+)
// OpenGraph contains facebook og data
type OpenGraph struct {
isArticle bool
isBook bool
isProfile bool
- Type string `json:"type"`
- URL string `json:"url"`
- Title string `json:"title"`
- Description string `json:"description"`
- Determiner string `json:"determiner"`
- SiteName string `json:"site_name"`
- Locale string `json:"locale"`
- LocalesAlternate []string `json:"locales_alternate"`
- Images []*Image `json:"images"`
- Audios []*Audio `json:"audios"`
- Videos []*Video `json:"videos"`
- Article *Article `json:"article,omitempty"`
- Book *Book `json:"book,omitempty"`
- Profile *Profile `json:"profile,omitempty"`
+ Type string `json:"type"`
+ URL string `json:"url"`
+ Title string `json:"title"`
+ Description string `json:"description"`
+ Determiner string `json:"determiner"`
+ SiteName string `json:"site_name"`
+ Locale string `json:"locale"`
+ LocalesAlternate []string `json:"locales_alternate"`
+ Images []*image.Image `json:"images"`
+ Audios []*audio.Audio `json:"audios"`
+ Videos []*video.Video `json:"videos"`
+ Article *article.Article `json:"article,omitempty"`
+ Book *book.Book `json:"book,omitempty"`
+ Profile *profile.Profile `json:"profile,omitempty"`
+ Music *music.Music `json:"music,omitempty"`
}
// NewOpenGraph returns new instance of Open Graph structure
@@ -137,21 +93,13 @@ func (og *OpenGraph) ensureHasVideo() {
if len(og.Videos) > 0 {
return
}
- og.Videos = append(og.Videos, &Video{draft: true})
-}
-
-func (og *OpenGraph) ensureHasImage() {
- if len(og.Images) > 0 {
- return
- }
- og.Images = append(og.Images, &Image{draft: true})
+ og.Videos = append(og.Videos, video.NewVideo())
}
-func (og *OpenGraph) ensureHasAudio() {
- if len(og.Audios) > 0 {
- return
+func (og *OpenGraph) ensureHasMusic() {
+ if og.Music == nil {
+ og.Music = music.NewMusic()
}
- og.Audios = append(og.Audios, &Audio{draft: true})
}
// ProcessMeta processes meta attributes and adds them to Open Graph structure if they are suitable for that
@@ -182,73 +130,110 @@ func (og *OpenGraph) ProcessMeta(metaAttrs map[string]string) {
case "og:locale:alternate":
og.LocalesAlternate = append(og.LocalesAlternate, metaAttrs["content"])
case "og:audio":
- if len(og.Audios)>0 && og.Audios[len(og.Audios)-1].draft {
- og.Audios[len(og.Audios)-1].URL = metaAttrs["content"]
- og.Audios[len(og.Audios)-1].draft = false
- } else {
- og.Audios = append(og.Audios, &Audio{URL: metaAttrs["content"]})
- }
+ og.Audios = audio.AddUrl(og.Audios, metaAttrs["content"])
case "og:audio:secure_url":
- og.ensureHasAudio()
- og.Audios[len(og.Audios)-1].SecureURL = metaAttrs["content"]
+ og.Audios = audio.AddSecureUrl(og.Audios, metaAttrs["content"])
case "og:audio:type":
- og.ensureHasAudio()
- og.Audios[len(og.Audios)-1].Type = metaAttrs["content"]
+ og.Audios = audio.AddType(og.Audios, metaAttrs["content"])
case "og:image":
- if len(og.Images)>0 && og.Images[len(og.Images)-1].draft {
- og.Images[len(og.Images)-1].URL = metaAttrs["content"]
- og.Images[len(og.Images)-1].draft = false
- } else {
- og.Images = append(og.Images, &Image{URL: metaAttrs["content"]})
- }
+ og.Images = image.AddURL(og.Images, metaAttrs["content"])
case "og:image:url":
- og.ensureHasImage()
- og.Images[len(og.Images)-1].URL = metaAttrs["content"]
+ og.Images = image.AddURL(og.Images, metaAttrs["content"])
case "og:image:secure_url":
- og.ensureHasImage()
- og.Images[len(og.Images)-1].SecureURL = metaAttrs["content"]
+ og.Images = image.AddSecureURL(og.Images, metaAttrs["content"])
case "og:image:type":
- og.ensureHasImage()
- og.Images[len(og.Images)-1].Type = metaAttrs["content"]
+ og.Images = image.AddType(og.Images, metaAttrs["content"])
case "og:image:width":
w, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
- og.ensureHasImage()
- og.Images[len(og.Images)-1].Width = w
+ og.Images = image.AddWidth(og.Images, w)
}
case "og:image:height":
h, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
- og.ensureHasImage()
- og.Images[len(og.Images)-1].Height = h
+ og.Images = image.AddHeight(og.Images, h)
}
case "og:video":
- if len(og.Videos)>0 && og.Videos[len(og.Videos)-1].draft {
- og.Videos[len(og.Videos)-1].URL = metaAttrs["content"]
- og.Videos[len(og.Videos)-1].draft = false
- } else {
- og.Videos = append(og.Videos, &Video{URL: metaAttrs["content"]})
+ og.Videos = video.AddURL(og.Videos, metaAttrs["content"])
+ case "og:video:tag":
+ og.Videos = video.AddTag(og.Videos, metaAttrs["content"])
+ case "og:video:duration":
+ if i, err := strconv.ParseUint(metaAttrs["content"], 10, 64); err == nil {
+ og.Videos = video.AddDuration(og.Videos, i)
+ }
+ case "og:video:release_date":
+ if t, err := time.Parse(time.RFC3339, metaAttrs["content"]); err == nil {
+ og.Videos = video.AddReleaseDate(og.Videos, &t)
}
case "og:video:url":
- og.ensureHasVideo()
- og.Videos[len(og.Videos)-1].URL = metaAttrs["content"]
+ og.Videos = video.AddURL(og.Videos, metaAttrs["content"])
case "og:video:secure_url":
- og.ensureHasVideo()
- og.Videos[len(og.Videos)-1].SecureURL = metaAttrs["content"]
+ og.Videos = video.AddSecureURL(og.Videos, metaAttrs["content"])
case "og:video:type":
- og.ensureHasVideo()
- og.Videos[len(og.Videos)-1].Type = metaAttrs["content"]
+ og.Videos = video.AddTag(og.Videos, metaAttrs["content"])
case "og:video:width":
w, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
- og.ensureHasVideo()
- og.Videos[len(og.Videos)-1].Width = w
+ og.Videos = video.AddWidth(og.Videos, w)
}
case "og:video:height":
h, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
- og.ensureHasVideo()
- og.Videos[len(og.Videos)-1].Height = h
+ og.Videos = video.AddHeight(og.Videos, h)
+ }
+ case "og:video:actor":
+ og.ensureHasVideo()
+ og.Videos[len(og.Videos)-1].Actors = actor.AddProfile(og.Videos[len(og.Videos)-1].Actors, metaAttrs["content"])
+ case "og:video:actor:role":
+ og.ensureHasVideo()
+ og.Videos[len(og.Videos)-1].Actors = actor.AddRole(og.Videos[len(og.Videos)-1].Actors, metaAttrs["content"])
+ case "og:video:director":
+ og.ensureHasVideo()
+ og.Videos[len(og.Videos)-1].Directors = append(og.Videos[len(og.Videos)-1].Directors, metaAttrs["content"])
+ case "og:video:writer":
+ og.ensureHasVideo()
+ og.Videos[len(og.Videos)-1].Writers = append(og.Videos[len(og.Videos)-1].Writers, metaAttrs["content"])
+ case "og:music:duration":
+ og.ensureHasMusic()
+ if i, err := strconv.ParseUint(metaAttrs["content"], 10, 64); err == nil {
+ og.Music.Duration = i
+ }
+ case "og:music:release_date":
+ og.ensureHasMusic()
+ if t, err := time.Parse(time.RFC3339, metaAttrs["content"]); err == nil {
+ og.Music.ReleaseDate = &t
+ }
+ case "og:music:album":
+ og.ensureHasMusic()
+ og.Music.Album.URL = metaAttrs["content"]
+ case "og:music:album:disc":
+ og.ensureHasMusic()
+ if i, err := strconv.ParseUint(metaAttrs["content"], 10, 64); err == nil {
+ og.Music.Album.Disc = i
+ }
+ case "og:music:album:track":
+ og.ensureHasMusic()
+ if i, err := strconv.ParseUint(metaAttrs["content"], 10, 64); err == nil {
+ og.Music.Album.Track = i
+ }
+ case "og:music:musician":
+ og.ensureHasMusic()
+ og.Music.Musicians = append(og.Music.Musicians, metaAttrs["content"])
+ case "og:music:creator":
+ og.ensureHasMusic()
+ og.Music.Creators = append(og.Music.Creators, metaAttrs["content"])
+ case "og:music:song":
+ og.ensureHasMusic()
+ og.Music.AddSongUrl(metaAttrs["content"])
+ case "og:music:disc":
+ og.ensureHasMusic()
+ if i, err := strconv.ParseUint(metaAttrs["content"], 10, 64); err == nil {
+ og.Music.AddSongDisc(i)
+ }
+ case "og:music:track":
+ og.ensureHasMusic()
+ if i, err := strconv.ParseUint(metaAttrs["content"], 10, 64); err == nil {
+ og.Music.AddSongTrack(i)
}
default:
if og.isArticle {
@@ -263,100 +248,64 @@ func (og *OpenGraph) ProcessMeta(metaAttrs map[string]string) {
func (og *OpenGraph) processArticleMeta(metaAttrs map[string]string) {
if og.Article == nil {
- og.Article = &Article{}
+ og.Article = &article.Article{}
}
switch metaAttrs["property"] {
- case "article:published_time":
+ case "og:article:published_time":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Article.PublishedTime = &t
}
- case "article:modified_time":
+ case "og:article:modified_time":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Article.ModifiedTime = &t
}
- case "article:expiration_time":
+ case "og:article:expiration_time":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Article.ExpirationTime = &t
}
- case "article:section":
+ case "og:article:section":
og.Article.Section = metaAttrs["content"]
- case "article:tag":
+ case "og:article:tag":
og.Article.Tags = append(og.Article.Tags, metaAttrs["content"])
- case "article:author:first_name":
- if len(og.Article.Authors) == 0 {
- og.Article.Authors = append(og.Article.Authors, &Profile{})
- }
- og.Article.Authors[len(og.Article.Authors)-1].FirstName = metaAttrs["content"]
- case "article:author:last_name":
- if len(og.Article.Authors) == 0 {
- og.Article.Authors = append(og.Article.Authors, &Profile{})
- }
- og.Article.Authors[len(og.Article.Authors)-1].LastName = metaAttrs["content"]
- case "article:author:username":
- if len(og.Article.Authors) == 0 {
- og.Article.Authors = append(og.Article.Authors, &Profile{})
- }
- og.Article.Authors[len(og.Article.Authors)-1].Username = metaAttrs["content"]
- case "article:author:gender":
- if len(og.Article.Authors) == 0 {
- og.Article.Authors = append(og.Article.Authors, &Profile{})
- }
- og.Article.Authors[len(og.Article.Authors)-1].Gender = metaAttrs["content"]
+ case "og:article:author":
+ og.Article.Authors = append(og.Article.Authors, metaAttrs["content"])
}
}
func (og *OpenGraph) processBookMeta(metaAttrs map[string]string) {
if og.Book == nil {
- og.Book = &Book{}
+ og.Book = &book.Book{}
}
switch metaAttrs["property"] {
- case "book:release_date":
+ case "og:book:release_date":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Book.ReleaseDate = &t
}
- case "book:isbn":
+ case "og:book:isbn":
og.Book.ISBN = metaAttrs["content"]
- case "book:tag":
+ case "og:book:tag":
og.Book.Tags = append(og.Book.Tags, metaAttrs["content"])
- case "book:author:first_name":
- if len(og.Book.Authors) == 0 {
- og.Book.Authors = append(og.Book.Authors, &Profile{})
- }
- og.Book.Authors[len(og.Book.Authors)-1].FirstName = metaAttrs["content"]
- case "book:author:last_name":
- if len(og.Book.Authors) == 0 {
- og.Book.Authors = append(og.Book.Authors, &Profile{})
- }
- og.Book.Authors[len(og.Book.Authors)-1].LastName = metaAttrs["content"]
- case "book:author:username":
- if len(og.Book.Authors) == 0 {
- og.Book.Authors = append(og.Book.Authors, &Profile{})
- }
- og.Book.Authors[len(og.Book.Authors)-1].Username = metaAttrs["content"]
- case "book:author:gender":
- if len(og.Book.Authors) == 0 {
- og.Book.Authors = append(og.Book.Authors, &Profile{})
- }
- og.Book.Authors[len(og.Book.Authors)-1].Gender = metaAttrs["content"]
+ case "og:book:author":
+ og.Book.Authors = append(og.Book.Authors, metaAttrs["content"])
}
}
func (og *OpenGraph) processProfileMeta(metaAttrs map[string]string) {
if og.Profile == nil {
- og.Profile = &Profile{}
+ og.Profile = &profile.Profile{}
}
switch metaAttrs["property"] {
- case "profile:first_name":
+ case "og:profile:first_name":
og.Profile.FirstName = metaAttrs["content"]
- case "profile:last_name":
+ case "og:profile:last_name":
og.Profile.LastName = metaAttrs["content"]
- case "profile:username":
+ case "og:profile:username":
og.Profile.Username = metaAttrs["content"]
- case "profile:gender":
+ case "og:profile:gender":
og.Profile.Gender = metaAttrs["content"]
}
}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/actor/actor.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/actor/actor.go
new file mode 100644
index 00000000..ed9b9953
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/actor/actor.go
@@ -0,0 +1,27 @@
+package actor
+
+// Actor contain Open Graph Actor structure
+type Actor struct {
+ Profile string `json:"profile"`
+ Role string `json:"role"`
+}
+
+func NewActor() *Actor {
+ return &Actor{}
+}
+
+func AddProfile(actors []*Actor, v string) []*Actor {
+ if len(actors) == 0 || actors[len(actors)-1].Profile != "" {
+ actors = append(actors, &Actor{})
+ }
+ actors[len(actors)-1].Profile = v
+ return actors
+}
+
+func AddRole(actors []*Actor, v string) []*Actor {
+ if len(actors) == 0 || actors[len(actors)-1].Role != "" {
+ actors = append(actors, &Actor{})
+ }
+ actors[len(actors)-1].Role = v
+ return actors
+}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/article/article.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/article/article.go
new file mode 100644
index 00000000..19729bc8
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/article/article.go
@@ -0,0 +1,15 @@
+package article
+
+import (
+ "time"
+)
+
+// Article contain Open Graph Article structure
+type Article struct {
+ PublishedTime *time.Time `json:"published_time"`
+ ModifiedTime *time.Time `json:"modified_time"`
+ ExpirationTime *time.Time `json:"expiration_time"`
+ Section string `json:"section"`
+ Tags []string `json:"tags"`
+ Authors []string `json:"authors"`
+}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/audio/audio.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/audio/audio.go
new file mode 100644
index 00000000..094d5a24
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/audio/audio.go
@@ -0,0 +1,36 @@
+package audio
+
+// Audio defines Open Graph Audio Type
+type Audio struct {
+ URL string `json:"url"`
+ SecureURL string `json:"secure_url"`
+ Type string `json:"type"`
+}
+
+func NewAudio() *Audio {
+ return &Audio{}
+}
+
+func AddUrl(audios []*Audio, v string) []*Audio {
+ if len(audios) == 0 || audios[len(audios)-1].URL != "" {
+ audios = append(audios, &Audio{})
+ }
+ audios[len(audios)-1].URL = v
+ return audios
+}
+
+func AddSecureUrl(audios []*Audio, v string) []*Audio {
+ if len(audios) == 0 || audios[len(audios)-1].SecureURL != "" {
+ audios = append(audios, &Audio{})
+ }
+ audios[len(audios)-1].SecureURL = v
+ return audios
+}
+
+func AddType(audios []*Audio, v string) []*Audio {
+ if len(audios) == 0 || audios[len(audios)-1].Type != "" {
+ audios = append(audios, &Audio{})
+ }
+ audios[len(audios)-1].Type = v
+ return audios
+}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/book/book.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/book/book.go
new file mode 100644
index 00000000..15145f64
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/book/book.go
@@ -0,0 +1,13 @@
+package book
+
+import (
+ "time"
+)
+
+// Book contains Open Graph Book structure
+type Book struct {
+ ISBN string `json:"isbn"`
+ ReleaseDate *time.Time `json:"release_date"`
+ Tags []string `json:"tags"`
+ Authors []string `json:"authors"`
+}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/image/image.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/image/image.go
new file mode 100644
index 00000000..06892654
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/image/image.go
@@ -0,0 +1,53 @@
+package image
+
+// Image defines Open Graph Image type
+type Image struct {
+ URL string `json:"url"`
+ SecureURL string `json:"secure_url"`
+ Type string `json:"type"`
+ Width uint64 `json:"width"`
+ Height uint64 `json:"height"`
+}
+
+func NewImage() *Image {
+ return &Image{}
+}
+
+func ensureHasImage(images []*Image) []*Image {
+ if len(images) == 0 {
+ images = append(images, NewImage())
+ }
+ return images
+}
+
+func AddURL(images []*Image, v string) []*Image {
+ if len(images) == 0 || (images[len(images)-1].URL != "" && images[len(images)-1].URL != v) {
+ images = append(images, NewImage())
+ }
+ images[len(images)-1].URL = v
+ return images
+}
+
+func AddSecureURL(images []*Image, v string) []*Image {
+ images = ensureHasImage(images)
+ images[len(images)-1].SecureURL = v
+ return images
+}
+
+func AddType(images []*Image, v string) []*Image {
+ images = ensureHasImage(images)
+ images[len(images)-1].Type = v
+ return images
+}
+
+func AddWidth(images []*Image, v uint64) []*Image {
+ images = ensureHasImage(images)
+ images[len(images)-1].Width = v
+ return images
+}
+
+func AddHeight(images []*Image, v uint64) []*Image {
+ images = ensureHasImage(images)
+ images[len(images)-1].Height = v
+ return images
+}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/music/music.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/music/music.go
new file mode 100644
index 00000000..d2145989
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/music/music.go
@@ -0,0 +1,52 @@
+package music
+
+import (
+ "time"
+)
+
+// Music defines Open Graph Music type
+type Music struct {
+ Musicians []string `json:"musicians,omitempty"`
+ Creators []string `json:"creators,omitempty"`
+ Duration uint64 `json:"duration,omitempty"`
+ ReleaseDate *time.Time `json:"release_date,omitempty"`
+ Album *Album `json:"album"`
+ Songs []*Song `json:"songs"`
+}
+
+type Album struct {
+ URL string `json:"url,omitempty"`
+ Disc uint64 `json:"disc,omitempty"`
+ Track uint64 `json:"track,omitempty"`
+}
+
+type Song struct {
+ URL string `json:"url,omitempty"`
+ Disc uint64 `json:"disc,omitempty"`
+ Track uint64 `json:"track,omitempty"`
+}
+
+func NewMusic() *Music {
+ return &Music{Album: &Album{}}
+}
+
+func (m *Music) AddSongUrl(v string) {
+ if len(m.Songs) == 0 || m.Songs[len(m.Songs)-1].URL != "" {
+ m.Songs = append(m.Songs, &Song{})
+ }
+ m.Songs[len(m.Songs)-1].URL = v
+}
+
+func (m *Music) AddSongDisc(v uint64) {
+ if len(m.Songs) == 0 {
+ m.Songs = append(m.Songs, &Song{})
+ }
+ m.Songs[len(m.Songs)-1].Disc = v
+}
+
+func (m *Music) AddSongTrack(v uint64) {
+ if len(m.Songs) == 0 {
+ m.Songs = append(m.Songs, &Song{})
+ }
+ m.Songs[len(m.Songs)-1].Track = v
+}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/profile/profile.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/profile/profile.go
new file mode 100644
index 00000000..f1a78b2f
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/profile/profile.go
@@ -0,0 +1,59 @@
+package profile
+
+import "strings"
+
+// Profile contain Open Graph Profile structure
+type Profile struct {
+ FirstName string `json:"first_name"`
+ LastName string `json:"last_name"`
+ Username string `json:"username"`
+ Gender string `json:"gender"`
+}
+
+func NewProfile() *Profile {
+ return &Profile{}
+}
+
+func AddBasicProfile(profiles []*Profile, v string) []*Profile {
+ parts := strings.SplitN(v, " ", 2)
+ if len(profiles) == 0 || profiles[len(profiles)-1].FirstName != "" {
+ profiles = append(profiles, &Profile{})
+ }
+ profiles[len(profiles)-1].FirstName = parts[0]
+ if len(parts) > 1 {
+ profiles[len(profiles)-1].LastName = parts[1]
+ }
+ return profiles
+}
+
+func AddFirstName(profiles []*Profile, v string) []*Profile {
+ if len(profiles) == 0 || profiles[len(profiles)-1].FirstName != "" {
+ profiles = append(profiles, &Profile{})
+ }
+ profiles[len(profiles)-1].FirstName = v
+ return profiles
+}
+
+func AddLastName(profiles []*Profile, v string) []*Profile {
+ if len(profiles) == 0 || profiles[len(profiles)-1].LastName != "" {
+ profiles = append(profiles, &Profile{})
+ }
+ profiles[len(profiles)-1].LastName = v
+ return profiles
+}
+
+func AddUsername(profiles []*Profile, v string) []*Profile {
+ if len(profiles) == 0 || profiles[len(profiles)-1].Username != "" {
+ profiles = append(profiles, &Profile{})
+ }
+ profiles[len(profiles)-1].Username = v
+ return profiles
+}
+
+func AddGender(profiles []*Profile, v string) []*Profile {
+ if len(profiles) == 0 || profiles[len(profiles)-1].Gender != "" {
+ profiles = append(profiles, &Profile{})
+ }
+ profiles[len(profiles)-1].Gender = v
+ return profiles
+}
diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/types/video/video.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/video/video.go
new file mode 100644
index 00000000..fc59730c
--- /dev/null
+++ b/vendor/github.com/dyatlov/go-opengraph/opengraph/types/video/video.go
@@ -0,0 +1,83 @@
+package video
+
+import (
+ "time"
+
+ "github.com/dyatlov/go-opengraph/opengraph/types/actor"
+)
+
+// Video defines Open Graph Video type
+type Video struct {
+ URL string `json:"url"`
+ SecureURL string `json:"secure_url"`
+ Type string `json:"type"`
+ Width uint64 `json:"width"`
+ Height uint64 `json:"height"`
+ Actors []*actor.Actor `json:"actors,omitempty"`
+ Directors []string `json:"directors,omitempty"`
+ Writers []string `json:"writers,omitempty"`
+ Duration uint64 `json:"duration,omitempty"`
+ ReleaseDate *time.Time `json:"release_date,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+}
+
+func NewVideo() *Video {
+ return &Video{}
+}
+
+func ensureHasVideo(videos []*Video) []*Video {
+ if len(videos) == 0 {
+ videos = append(videos, NewVideo())
+ }
+ return videos
+}
+
+func AddURL(videos []*Video, v string) []*Video {
+ if len(videos) == 0 || (videos[len(videos)-1].URL != "" && videos[len(videos)-1].URL != v) {
+ videos = append(videos, NewVideo())
+ }
+ videos[len(videos)-1].URL = v
+ return videos
+}
+
+func AddTag(videos []*Video, v string) []*Video {
+ videos = ensureHasVideo(videos)
+ videos[len(videos)-1].Tags = append(videos[len(videos)-1].Tags, v)
+ return videos
+}
+
+func AddDuration(videos []*Video, v uint64) []*Video {
+ videos = ensureHasVideo(videos)
+ videos[len(videos)-1].Duration = v
+ return videos
+}
+
+func AddReleaseDate(videos []*Video, v *time.Time) []*Video {
+ videos = ensureHasVideo(videos)
+ videos[len(videos)-1].ReleaseDate = v
+ return videos
+}
+
+func AddSecureURL(videos []*Video, v string) []*Video {
+ videos = ensureHasVideo(videos)
+ videos[len(videos)-1].SecureURL = v
+ return videos
+}
+
+func AddType(videos []*Video, v string) []*Video {
+ videos = ensureHasVideo(videos)
+ videos[len(videos)-1].Type = v
+ return videos
+}
+
+func AddWidth(videos []*Video, v uint64) []*Video {
+ videos = ensureHasVideo(videos)
+ videos[len(videos)-1].Width = v
+ return videos
+}
+
+func AddHeight(videos []*Video, v uint64) []*Video {
+ videos = ensureHasVideo(videos)
+ videos[len(videos)-1].Height = v
+ return videos
+}
diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md
new file mode 100644
index 00000000..25fdaf63
--- /dev/null
+++ b/vendor/github.com/fatih/color/LICENSE.md
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md
new file mode 100644
index 00000000..be82827c
--- /dev/null
+++ b/vendor/github.com/fatih/color/README.md
@@ -0,0 +1,176 @@
+# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color)
+
+Color lets you use colorized outputs in terms of [ANSI Escape
+Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
+has support for Windows too! The API can be used in several ways, pick one that
+suits you.
+
+![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg)
+
+## Install
+
+```bash
+go get github.com/fatih/color
+```
+
+## Examples
+
+### Standard colors
+
+```go
+// Print with default helper functions
+color.Cyan("Prints text in cyan.")
+
+// A newline will be appended automatically
+color.Blue("Prints %s in blue.", "text")
+
+// These are using the default foreground colors
+color.Red("We have red")
+color.Magenta("And many others ..")
+
+```
+
+### Mix and reuse colors
+
+```go
+// Create a new color object
+c := color.New(color.FgCyan).Add(color.Underline)
+c.Println("Prints cyan text with an underline.")
+
+// Or just add them to New()
+d := color.New(color.FgCyan, color.Bold)
+d.Printf("This prints bold cyan %s\n", "too!.")
+
+// Mix up foreground and background colors, create new mixes!
+red := color.New(color.FgRed)
+
+boldRed := red.Add(color.Bold)
+boldRed.Println("This will print text in bold red.")
+
+whiteBackground := red.Add(color.BgWhite)
+whiteBackground.Println("Red text with white background.")
+```
+
+### Use your own output (io.Writer)
+
+```go
+// Use your own io.Writer output
+color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
+
+blue := color.New(color.FgBlue)
+blue.Fprint(writer, "This will print text in blue.")
+```
+
+### Custom print functions (PrintFunc)
+
+```go
+// Create a custom print function for convenience
+red := color.New(color.FgRed).PrintfFunc()
+red("Warning")
+red("Error: %s", err)
+
+// Mix up multiple attributes
+notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
+notice("Don't forget this...")
+```
+
+### Custom fprint functions (FprintFunc)
+
+```go
+blue := color.New(color.FgBlue).FprintfFunc()
+blue(myWriter, "important notice: %s", stars)
+
+// Mix up with multiple attributes
+success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
+success(myWriter, "Don't forget this...")
+```
+
+### Insert into noncolor strings (SprintFunc)
+
+```go
+// Create SprintXxx functions to mix strings with other non-colorized strings:
+yellow := color.New(color.FgYellow).SprintFunc()
+red := color.New(color.FgRed).SprintFunc()
+fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
+
+info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
+fmt.Printf("This %s rocks!\n", info("package"))
+
+// Use helper functions
+fmt.Println("This", color.RedString("warning"), "should be not neglected.")
+fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
+
+// Windows supported too! Just don't forget to change the output to color.Output
+fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
+```
+
+### Plug into existing code
+
+```go
+// Use handy standard colors
+color.Set(color.FgYellow)
+
+fmt.Println("Existing text will now be in yellow")
+fmt.Printf("This one %s\n", "too")
+
+color.Unset() // Don't forget to unset
+
+// You can mix up parameters
+color.Set(color.FgMagenta, color.Bold)
+defer color.Unset() // Use it in your function
+
+fmt.Println("All text will now be bold magenta.")
+```
+
+### Disable/Enable color
+
+There might be a case where you want to explicitly disable/enable color output. the
+`go-isatty` package will automatically disable color output for non-tty output streams
+(for example if the output were piped directly to `less`).
+
+The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment
+variable is set to a non-empty string.
+
+`Color` has support to disable/enable colors programmatically both globally and
+for single color definitions. For example suppose you have a CLI app and a
+`-no-color` bool flag. You can easily disable the color output with:
+
+```go
+var flagNoColor = flag.Bool("no-color", false, "Disable color output")
+
+if *flagNoColor {
+ color.NoColor = true // disables colorized output
+}
+```
+
+It also has support for single color definitions (local). You can
+disable/enable color output on the fly:
+
+```go
+c := color.New(color.FgCyan)
+c.Println("Prints cyan text")
+
+c.DisableColor()
+c.Println("This is printed without any color")
+
+c.EnableColor()
+c.Println("This prints again cyan...")
+```
+
+## GitHub Actions
+
+To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
+
+## Todo
+
+* Save/Return previous values
+* Evaluate fmt.Formatter interface
+
+## Credits
+
+* [Fatih Arslan](https://github.com/fatih)
+* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
+
+## License
+
+The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go
new file mode 100644
index 00000000..c4234287
--- /dev/null
+++ b/vendor/github.com/fatih/color/color.go
@@ -0,0 +1,650 @@
+package color
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+var (
+ // NoColor defines if the output is colorized or not. It's dynamically set to
+ // false or true based on the stdout's file descriptor referring to a terminal
+ // or not. It's also set to true if the NO_COLOR environment variable is
+ // set (regardless of its value). This is a global option and affects all
+ // colors. For more control over each color block use the methods
+ // DisableColor() individually.
+ NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" ||
+ (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
+
+ // Output defines the standard output of the print functions. By default,
+ // os.Stdout is used.
+ Output = colorable.NewColorableStdout()
+
+ // Error defines a color supporting writer for os.Stderr.
+ Error = colorable.NewColorableStderr()
+
+ // colorsCache is used to reduce the count of created Color objects and
+ // allows to reuse already created objects with required Attribute.
+ colorsCache = make(map[Attribute]*Color)
+ colorsCacheMu sync.Mutex // protects colorsCache
+)
+
+// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string.
+func noColorIsSet() bool {
+ return os.Getenv("NO_COLOR") != ""
+}
+
+// Color defines a custom color object which is defined by SGR parameters.
+type Color struct {
+ params []Attribute
+ noColor *bool
+}
+
+// Attribute defines a single SGR Code
+type Attribute int
+
+const escape = "\x1b"
+
+// Base attributes
+const (
+ Reset Attribute = iota
+ Bold
+ Faint
+ Italic
+ Underline
+ BlinkSlow
+ BlinkRapid
+ ReverseVideo
+ Concealed
+ CrossedOut
+)
+
+const (
+ ResetBold Attribute = iota + 22
+ ResetItalic
+ ResetUnderline
+ ResetBlinking
+ _
+ ResetReversed
+ ResetConcealed
+ ResetCrossedOut
+)
+
+var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{
+ Bold: ResetBold,
+ Faint: ResetBold,
+ Italic: ResetItalic,
+ Underline: ResetUnderline,
+ BlinkSlow: ResetBlinking,
+ BlinkRapid: ResetBlinking,
+ ReverseVideo: ResetReversed,
+ Concealed: ResetConcealed,
+ CrossedOut: ResetCrossedOut,
+}
+
+// Foreground text colors
+const (
+ FgBlack Attribute = iota + 30
+ FgRed
+ FgGreen
+ FgYellow
+ FgBlue
+ FgMagenta
+ FgCyan
+ FgWhite
+)
+
+// Foreground Hi-Intensity text colors
+const (
+ FgHiBlack Attribute = iota + 90
+ FgHiRed
+ FgHiGreen
+ FgHiYellow
+ FgHiBlue
+ FgHiMagenta
+ FgHiCyan
+ FgHiWhite
+)
+
+// Background text colors
+const (
+ BgBlack Attribute = iota + 40
+ BgRed
+ BgGreen
+ BgYellow
+ BgBlue
+ BgMagenta
+ BgCyan
+ BgWhite
+)
+
+// Background Hi-Intensity text colors
+const (
+ BgHiBlack Attribute = iota + 100
+ BgHiRed
+ BgHiGreen
+ BgHiYellow
+ BgHiBlue
+ BgHiMagenta
+ BgHiCyan
+ BgHiWhite
+)
+
+// New returns a newly created color object.
+func New(value ...Attribute) *Color {
+ c := &Color{
+ params: make([]Attribute, 0),
+ }
+
+ if noColorIsSet() {
+ c.noColor = boolPtr(true)
+ }
+
+ c.Add(value...)
+ return c
+}
+
+// Set sets the given parameters immediately. It will change the color of
+// output with the given SGR parameters until color.Unset() is called.
+func Set(p ...Attribute) *Color {
+ c := New(p...)
+ c.Set()
+ return c
+}
+
+// Unset resets all escape attributes and clears the output. Usually should
+// be called after Set().
+func Unset() {
+ if NoColor {
+ return
+ }
+
+ fmt.Fprintf(Output, "%s[%dm", escape, Reset)
+}
+
+// Set sets the SGR sequence.
+func (c *Color) Set() *Color {
+ if c.isNoColorSet() {
+ return c
+ }
+
+ fmt.Fprint(Output, c.format())
+ return c
+}
+
+func (c *Color) unset() {
+ if c.isNoColorSet() {
+ return
+ }
+
+ Unset()
+}
+
+// SetWriter is used to set the SGR sequence with the given io.Writer. This is
+// a low-level function, and users should use the higher-level functions, such
+// as color.Fprint, color.Print, etc.
+func (c *Color) SetWriter(w io.Writer) *Color {
+ if c.isNoColorSet() {
+ return c
+ }
+
+ fmt.Fprint(w, c.format())
+ return c
+}
+
+// UnsetWriter resets all escape attributes and clears the output with the give
+// io.Writer. Usually should be called after SetWriter().
+func (c *Color) UnsetWriter(w io.Writer) {
+ if c.isNoColorSet() {
+ return
+ }
+
+ if NoColor {
+ return
+ }
+
+ fmt.Fprintf(w, "%s[%dm", escape, Reset)
+}
+
+// Add is used to chain SGR parameters. Use as many as parameters to combine
+// and create custom color objects. Example: Add(color.FgRed, color.Underline).
+func (c *Color) Add(value ...Attribute) *Color {
+ c.params = append(c.params, value...)
+ return c
+}
+
+// Fprint formats using the default formats for its operands and writes to w.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+// On Windows, users should wrap w with colorable.NewColorable() if w is of
+// type *os.File.
+func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ c.SetWriter(w)
+ defer c.UnsetWriter(w)
+
+ return fmt.Fprint(w, a...)
+}
+
+// Print formats using the default formats for its operands and writes to
+// standard output. Spaces are added between operands when neither is a
+// string. It returns the number of bytes written and any write error
+// encountered. This is the standard fmt.Print() method wrapped with the given
+// color.
+func (c *Color) Print(a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprint(Output, a...)
+}
+
+// Fprintf formats according to a format specifier and writes to w.
+// It returns the number of bytes written and any write error encountered.
+// On Windows, users should wrap w with colorable.NewColorable() if w is of
+// type *os.File.
+func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ c.SetWriter(w)
+ defer c.UnsetWriter(w)
+
+ return fmt.Fprintf(w, format, a...)
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+// This is the standard fmt.Printf() method wrapped with the given color.
+func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprintf(Output, format, a...)
+}
+
+// Fprintln formats using the default formats for its operands and writes to w.
+// Spaces are always added between operands and a newline is appended.
+// On Windows, users should wrap w with colorable.NewColorable() if w is of
+// type *os.File.
+func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.wrap(fmt.Sprint(a...)))
+}
+
+// Println formats using the default formats for its operands and writes to
+// standard output. Spaces are always added between operands and a newline is
+// appended. It returns the number of bytes written and any write error
+// encountered. This is the standard fmt.Print() method wrapped with the given
+// color.
+func (c *Color) Println(a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(Output, c.wrap(fmt.Sprint(a...)))
+}
+
+// Sprint is just like Print, but returns a string instead of printing it.
+func (c *Color) Sprint(a ...interface{}) string {
+ return c.wrap(fmt.Sprint(a...))
+}
+
+// Sprintln is just like Println, but returns a string instead of printing it.
+func (c *Color) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.Sprint(a...))
+}
+
+// Sprintf is just like Printf, but returns a string instead of printing it.
+func (c *Color) Sprintf(format string, a ...interface{}) string {
+ return c.wrap(fmt.Sprintf(format, a...))
+}
+
+// FprintFunc returns a new function that prints the passed arguments as
+// colorized with color.Fprint().
+func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
+ return func(w io.Writer, a ...interface{}) {
+ c.Fprint(w, a...)
+ }
+}
+
+// PrintFunc returns a new function that prints the passed arguments as
+// colorized with color.Print().
+func (c *Color) PrintFunc() func(a ...interface{}) {
+ return func(a ...interface{}) {
+ c.Print(a...)
+ }
+}
+
+// FprintfFunc returns a new function that prints the passed arguments as
+// colorized with color.Fprintf().
+func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
+ return func(w io.Writer, format string, a ...interface{}) {
+ c.Fprintf(w, format, a...)
+ }
+}
+
+// PrintfFunc returns a new function that prints the passed arguments as
+// colorized with color.Printf().
+func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
+ return func(format string, a ...interface{}) {
+ c.Printf(format, a...)
+ }
+}
+
+// FprintlnFunc returns a new function that prints the passed arguments as
+// colorized with color.Fprintln().
+func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
+ return func(w io.Writer, a ...interface{}) {
+ c.Fprintln(w, a...)
+ }
+}
+
+// PrintlnFunc returns a new function that prints the passed arguments as
+// colorized with color.Println().
+func (c *Color) PrintlnFunc() func(a ...interface{}) {
+ return func(a ...interface{}) {
+ c.Println(a...)
+ }
+}
+
+// SprintFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprint(). Useful to put into or mix into other
+// string. Windows users should use this in conjunction with color.Output, example:
+//
+// put := New(FgYellow).SprintFunc()
+// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
+func (c *Color) SprintFunc() func(a ...interface{}) string {
+ return func(a ...interface{}) string {
+ return c.wrap(fmt.Sprint(a...))
+ }
+}
+
+// SprintfFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprintf(). Useful to put into or mix into other
+// string. Windows users should use this in conjunction with color.Output.
+func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
+ return func(format string, a ...interface{}) string {
+ return c.wrap(fmt.Sprintf(format, a...))
+ }
+}
+
+// SprintlnFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprintln(). Useful to put into or mix into other
+// string. Windows users should use this in conjunction with color.Output.
+func (c *Color) SprintlnFunc() func(a ...interface{}) string {
+ return func(a ...interface{}) string {
+ return fmt.Sprintln(c.Sprint(a...))
+ }
+}
+
+// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
+// an example output might be: "1;36" -> bold cyan
+func (c *Color) sequence() string {
+ format := make([]string, len(c.params))
+ for i, v := range c.params {
+ format[i] = strconv.Itoa(int(v))
+ }
+
+ return strings.Join(format, ";")
+}
+
+// wrap wraps the s string with the colors attributes. The string is ready to
+// be printed.
+func (c *Color) wrap(s string) string {
+ if c.isNoColorSet() {
+ return s
+ }
+
+ return c.format() + s + c.unformat()
+}
+
+func (c *Color) format() string {
+ return fmt.Sprintf("%s[%sm", escape, c.sequence())
+}
+
+func (c *Color) unformat() string {
+ //return fmt.Sprintf("%s[%dm", escape, Reset)
+ //for each element in sequence let's use the speficic reset escape, ou the generic one if not found
+ format := make([]string, len(c.params))
+ for i, v := range c.params {
+ format[i] = strconv.Itoa(int(Reset))
+ ra, ok := mapResetAttributes[v]
+ if ok {
+ format[i] = strconv.Itoa(int(ra))
+ }
+ }
+
+ return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";"))
+}
+
+// DisableColor disables the color output. Useful to not change any existing
+// code and still being able to output. Can be used for flags like
+// "--no-color". To enable back use EnableColor() method.
+func (c *Color) DisableColor() {
+ c.noColor = boolPtr(true)
+}
+
+// EnableColor enables the color output. Use it in conjunction with
+// DisableColor(). Otherwise, this method has no side effects.
+func (c *Color) EnableColor() {
+ c.noColor = boolPtr(false)
+}
+
+func (c *Color) isNoColorSet() bool {
+ // check first if we have user set action
+ if c.noColor != nil {
+ return *c.noColor
+ }
+
+ // if not return the global option, which is disabled by default
+ return NoColor
+}
+
+// Equals returns a boolean value indicating whether two colors are equal.
+func (c *Color) Equals(c2 *Color) bool {
+ if c == nil && c2 == nil {
+ return true
+ }
+ if c == nil || c2 == nil {
+ return false
+ }
+ if len(c.params) != len(c2.params) {
+ return false
+ }
+
+ for _, attr := range c.params {
+ if !c2.attrExists(attr) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *Color) attrExists(a Attribute) bool {
+ for _, attr := range c.params {
+ if attr == a {
+ return true
+ }
+ }
+
+ return false
+}
+
+func boolPtr(v bool) *bool {
+ return &v
+}
+
+func getCachedColor(p Attribute) *Color {
+ colorsCacheMu.Lock()
+ defer colorsCacheMu.Unlock()
+
+ c, ok := colorsCache[p]
+ if !ok {
+ c = New(p)
+ colorsCache[p] = c
+ }
+
+ return c
+}
+
+func colorPrint(format string, p Attribute, a ...interface{}) {
+ c := getCachedColor(p)
+
+ if !strings.HasSuffix(format, "\n") {
+ format += "\n"
+ }
+
+ if len(a) == 0 {
+ c.Print(format)
+ } else {
+ c.Printf(format, a...)
+ }
+}
+
+func colorString(format string, p Attribute, a ...interface{}) string {
+ c := getCachedColor(p)
+
+ if len(a) == 0 {
+ return c.SprintFunc()(format)
+ }
+
+ return c.SprintfFunc()(format, a...)
+}
+
+// Black is a convenient helper function to print with black foreground. A
+// newline is appended to format by default.
+func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
+
+// Red is a convenient helper function to print with red foreground. A
+// newline is appended to format by default.
+func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
+
+// Green is a convenient helper function to print with green foreground. A
+// newline is appended to format by default.
+func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
+
+// Yellow is a convenient helper function to print with yellow foreground.
+// A newline is appended to format by default.
+func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
+
+// Blue is a convenient helper function to print with blue foreground. A
+// newline is appended to format by default.
+func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
+
+// Magenta is a convenient helper function to print with magenta foreground.
+// A newline is appended to format by default.
+func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
+
+// Cyan is a convenient helper function to print with cyan foreground. A
+// newline is appended to format by default.
+func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
+
+// White is a convenient helper function to print with white foreground. A
+// newline is appended to format by default.
+func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
+
+// BlackString is a convenient helper function to return a string with black
+// foreground.
+func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
+
+// RedString is a convenient helper function to return a string with red
+// foreground.
+func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
+
+// GreenString is a convenient helper function to return a string with green
+// foreground.
+func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
+
+// YellowString is a convenient helper function to return a string with yellow
+// foreground.
+func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
+
+// BlueString is a convenient helper function to return a string with blue
+// foreground.
+func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
+
+// MagentaString is a convenient helper function to return a string with magenta
+// foreground.
+func MagentaString(format string, a ...interface{}) string {
+ return colorString(format, FgMagenta, a...)
+}
+
+// CyanString is a convenient helper function to return a string with cyan
+// foreground.
+func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
+
+// WhiteString is a convenient helper function to return a string with white
+// foreground.
+func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
+
+// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
+// newline is appended to format by default.
+func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
+
+// HiRed is a convenient helper function to print with hi-intensity red foreground. A
+// newline is appended to format by default.
+func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
+
+// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
+// newline is appended to format by default.
+func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
+
+// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
+// A newline is appended to format by default.
+func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
+
+// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
+// newline is appended to format by default.
+func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
+
+// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
+// A newline is appended to format by default.
+func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
+
+// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
+// newline is appended to format by default.
+func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
+
+// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
+// newline is appended to format by default.
+func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
+
+// HiBlackString is a convenient helper function to return a string with hi-intensity black
+// foreground.
+func HiBlackString(format string, a ...interface{}) string {
+ return colorString(format, FgHiBlack, a...)
+}
+
+// HiRedString is a convenient helper function to return a string with hi-intensity red
+// foreground.
+func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
+
+// HiGreenString is a convenient helper function to return a string with hi-intensity green
+// foreground.
+func HiGreenString(format string, a ...interface{}) string {
+ return colorString(format, FgHiGreen, a...)
+}
+
+// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
+// foreground.
+func HiYellowString(format string, a ...interface{}) string {
+ return colorString(format, FgHiYellow, a...)
+}
+
+// HiBlueString is a convenient helper function to return a string with hi-intensity blue
+// foreground.
+func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
+
+// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
+// foreground.
+func HiMagentaString(format string, a ...interface{}) string {
+ return colorString(format, FgHiMagenta, a...)
+}
+
+// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
+// foreground.
+func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
+
+// HiWhiteString is a convenient helper function to return a string with hi-intensity white
+// foreground.
+func HiWhiteString(format string, a ...interface{}) string {
+ return colorString(format, FgHiWhite, a...)
+}
diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go
new file mode 100644
index 00000000..be01c558
--- /dev/null
+++ b/vendor/github.com/fatih/color/color_windows.go
@@ -0,0 +1,19 @@
+package color
+
+import (
+ "os"
+
+ "golang.org/x/sys/windows"
+)
+
+func init() {
+ // Opt-in for ansi color support for current process.
+ // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences
+ var outMode uint32
+ out := windows.Handle(os.Stdout.Fd())
+ if err := windows.GetConsoleMode(out, &outMode); err != nil {
+ return
+ }
+ outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ _ = windows.SetConsoleMode(out, outMode)
+}
diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go
new file mode 100644
index 00000000..9491ad54
--- /dev/null
+++ b/vendor/github.com/fatih/color/doc.go
@@ -0,0 +1,134 @@
+/*
+Package color is an ANSI color package to output colorized or SGR defined
+output to the standard output. The API can be used in several way, pick one
+that suits you.
+
+Use simple and default helper functions with predefined foreground colors:
+
+ color.Cyan("Prints text in cyan.")
+
+ // a newline will be appended automatically
+ color.Blue("Prints %s in blue.", "text")
+
+ // More default foreground colors..
+ color.Red("We have red")
+ color.Yellow("Yellow color too!")
+ color.Magenta("And many others ..")
+
+ // Hi-intensity colors
+ color.HiGreen("Bright green color.")
+ color.HiBlack("Bright black means gray..")
+ color.HiWhite("Shiny white color!")
+
+However, there are times when custom color mixes are required. Below are some
+examples to create custom color objects and use the print functions of each
+separate color object.
+
+ // Create a new color object
+ c := color.New(color.FgCyan).Add(color.Underline)
+ c.Println("Prints cyan text with an underline.")
+
+ // Or just add them to New()
+ d := color.New(color.FgCyan, color.Bold)
+ d.Printf("This prints bold cyan %s\n", "too!.")
+
+
+ // Mix up foreground and background colors, create new mixes!
+ red := color.New(color.FgRed)
+
+ boldRed := red.Add(color.Bold)
+ boldRed.Println("This will print text in bold red.")
+
+ whiteBackground := red.Add(color.BgWhite)
+ whiteBackground.Println("Red text with White background.")
+
+ // Use your own io.Writer output
+ color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
+
+ blue := color.New(color.FgBlue)
+ blue.Fprint(myWriter, "This will print text in blue.")
+
+You can create PrintXxx functions to simplify even more:
+
+ // Create a custom print function for convenient
+ red := color.New(color.FgRed).PrintfFunc()
+ red("warning")
+ red("error: %s", err)
+
+ // Mix up multiple attributes
+ notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
+ notice("don't forget this...")
+
+You can also FprintXxx functions to pass your own io.Writer:
+
+ blue := color.New(FgBlue).FprintfFunc()
+ blue(myWriter, "important notice: %s", stars)
+
+ // Mix up with multiple attributes
+ success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
+ success(myWriter, don't forget this...")
+
+Or create SprintXxx functions to mix strings with other non-colorized strings:
+
+ yellow := New(FgYellow).SprintFunc()
+ red := New(FgRed).SprintFunc()
+
+ fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
+
+ info := New(FgWhite, BgGreen).SprintFunc()
+ fmt.Printf("this %s rocks!\n", info("package"))
+
+Windows support is enabled by default. All Print functions work as intended.
+However, only for color.SprintXXX functions, user should use fmt.FprintXXX and
+set the output to color.Output:
+
+ fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
+
+ info := New(FgWhite, BgGreen).SprintFunc()
+ fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
+
+Using with existing code is possible. Just use the Set() method to set the
+standard output to the given parameters. That way a rewrite of an existing
+code is not required.
+
+ // Use handy standard colors.
+ color.Set(color.FgYellow)
+
+ fmt.Println("Existing text will be now in Yellow")
+ fmt.Printf("This one %s\n", "too")
+
+ color.Unset() // don't forget to unset
+
+ // You can mix up parameters
+ color.Set(color.FgMagenta, color.Bold)
+ defer color.Unset() // use it in your function
+
+ fmt.Println("All text will be now bold magenta.")
+
+There might be a case where you want to disable color output (for example to
+pipe the standard output of your app to somewhere else). `Color` has support to
+disable colors both globally and for single color definition. For example
+suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
+the color output with:
+
+ var flagNoColor = flag.Bool("no-color", false, "Disable color output")
+
+ if *flagNoColor {
+ color.NoColor = true // disables colorized output
+ }
+
+You can also disable the color by setting the NO_COLOR environment variable to any value.
+
+It also has support for single color definitions (local). You can
+disable/enable color output on the fly:
+
+ c := color.New(color.FgCyan)
+ c.Println("Prints cyan text")
+
+ c.DisableColor()
+ c.Println("This is printed without any color")
+
+ c.EnableColor()
+ c.Println("This prints again cyan...")
+*/
+package color
diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
new file mode 100644
index 00000000..ffc7b992
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
@@ -0,0 +1,13 @@
+freebsd_task:
+ name: 'FreeBSD'
+ freebsd_instance:
+ image_family: freebsd-13-2
+ install_script:
+ - pkg update -f
+ - pkg install -y go
+ test_script:
+ # run tests as user "cirrus" instead of root
+ - pw useradd cirrus -m
+ - chown -R cirrus:cirrus .
+ - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
index 1d89d85c..391cc076 100644
--- a/vendor/github.com/fsnotify/fsnotify/.gitignore
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -4,3 +4,4 @@
# Output of go build ./cmd/fsnotify
/fsnotify
+/fsnotify.exe
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index 77f9593b..e0e57575 100644
--- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,16 +1,87 @@
# Changelog
-All notable changes to this project will be documented in this file.
+Unreleased
+----------
+Nothing yet.
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+1.7.0 - 2023-10-22
+------------------
+This version of fsnotify needs Go 1.17.
-## [Unreleased]
+### Additions
-Nothing yet.
+- illumos: add FEN backend to support illumos and Solaris. ([#371])
+
+- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
+ in cases where you can't control the kernel buffer and receive a large number
+ of events in bursts. ([#550], [#572])
+
+- all: add `AddWith()`, which is identical to `Add()` but allows passing
+ options. ([#521])
+
+- windows: allow setting the ReadDirectoryChangesW() buffer size with
+ `fsnotify.WithBufferSize()`; the default of 64K is the highest value that
+ works on all platforms and is enough for most purposes, but in some cases a
+ highest buffer is needed. ([#521])
+
+### Changes and fixes
+
+- inotify: remove watcher if a watched path is renamed ([#518])
+
+ After a rename the reported name wasn't updated, or even an empty string.
+ Inotify doesn't provide any good facilities to update it, so just remove the
+ watcher. This is already how it worked on kqueue and FEN.
+
+ On Windows this does work, and remains working.
+
+- windows: don't listen for file attribute changes ([#520])
+
+ File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
+ with no way to see if they're a file write or attribute change, so would show
+ up as a fsnotify.Write event. This is never useful, and could result in many
+ spurious Write events.
+
+- windows: return `ErrEventOverflow` if the buffer is full ([#525])
+
+ Before it would merely return "short read", making it hard to detect this
+ error.
+
+- kqueue: make sure events for all files are delivered properly when removing a
+ watched directory ([#526])
+
+ Previously they would get sent with `""` (empty string) or `"."` as the path
+ name.
+
+- kqueue: don't emit spurious Create events for symbolic links ([#524])
+
+ The link would get resolved but kqueue would "forget" it already saw the link
+ itself, resulting on a Create for every Write event for the directory.
+
+- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
+
+- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
+ `backend_other.go`, making it easier to use on unsupported platforms such as
+ WASM, AIX, etc. ([#528])
+
+- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
+ Google AppEngine forbids usage of the unsafe package so the inotify backend
+ won't compile there.
-## [1.6.0] - 2022-10-13
+[#371]: https://github.com/fsnotify/fsnotify/pull/371
+[#516]: https://github.com/fsnotify/fsnotify/pull/516
+[#518]: https://github.com/fsnotify/fsnotify/pull/518
+[#520]: https://github.com/fsnotify/fsnotify/pull/520
+[#521]: https://github.com/fsnotify/fsnotify/pull/521
+[#524]: https://github.com/fsnotify/fsnotify/pull/524
+[#525]: https://github.com/fsnotify/fsnotify/pull/525
+[#526]: https://github.com/fsnotify/fsnotify/pull/526
+[#528]: https://github.com/fsnotify/fsnotify/pull/528
+[#537]: https://github.com/fsnotify/fsnotify/pull/537
+[#550]: https://github.com/fsnotify/fsnotify/pull/550
+[#572]: https://github.com/fsnotify/fsnotify/pull/572
+1.6.0 - 2022-10-13
+------------------
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
but not documented). It also increases the minimum Linux version to 2.6.32.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
index d4e6080f..e480733d 100644
--- a/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -1,29 +1,31 @@
fsnotify is a Go library to provide cross-platform filesystem notifications on
-Windows, Linux, macOS, and BSD systems.
+Windows, Linux, macOS, BSD, and illumos.
-Go 1.16 or newer is required; the full documentation is at
+Go 1.17 or newer is required; the full documentation is at
https://pkg.go.dev/github.com/fsnotify/fsnotify
-**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
-released version, whereas this README is for the last development version which
-may include additions/changes.**
-
---
Platform support:
-| Adapter | OS | Status |
-| --------------------- | ---------------| -------------------------------------------------------------|
-| inotify | Linux 2.6.32+ | Supported |
-| kqueue | BSD, macOS | Supported |
-| ReadDirectoryChangesW | Windows | Supported |
-| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
-| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
-| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
-| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
-| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
-
-Linux and macOS should include Android and iOS, but these are currently untested.
+| Backend | OS | Status |
+| :-------------------- | :--------- | :------------------------------------------------------------------------ |
+| inotify | Linux | Supported |
+| kqueue | BSD, macOS | Supported |
+| ReadDirectoryChangesW | Windows | Supported |
+| FEN | illumos | Supported |
+| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
+| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
+| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
+| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
+| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
+
+Linux and illumos should include Android and Solaris, but these are currently
+untested.
+
+[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
+[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
+[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
Usage
-----
@@ -83,20 +85,23 @@ run with:
% go run ./cmd/fsnotify
+Further detailed documentation can be found in godoc:
+https://pkg.go.dev/github.com/fsnotify/fsnotify
+
FAQ
---
### Will a file still be watched when it's moved to another directory?
No, not unless you are watching the location it was moved to.
-### Are subdirectories watched too?
+### Are subdirectories watched?
No, you must add watches for any directory you want to watch (a recursive
watcher is on the roadmap: [#18]).
[#18]: https://github.com/fsnotify/fsnotify/issues/18
### Do I have to watch the Error and Event channels in a goroutine?
-As of now, yes (you can read both channels in the same goroutine using `select`,
-you don't need a separate goroutine for both channels; see the example).
+Yes. You can read both channels in the same goroutine using `select` (you don't
+need a separate goroutine for both channels; see the example).
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
fsnotify requires support from underlying OS to work. The current NFS and SMB
@@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
[#9]: https://github.com/fsnotify/fsnotify/issues/9
+### Why do I get many Chmod events?
+Some programs may generate a lot of attribute changes; for example Spotlight on
+macOS, anti-virus programs, backup applications, and some others are known to do
+this. As a rule, it's typically best to ignore Chmod events. They're often not
+useful, and tend to cause problems.
+
+Spotlight indexing on macOS can result in multiple events (see [#15]). A
+temporary workaround is to add your folder(s) to the *Spotlight Privacy
+settings* until we have a native FSEvents implementation (see [#11]).
+
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#15]: https://github.com/fsnotify/fsnotify/issues/15
+
+### Watching a file doesn't work well
+Watching individual files (rather than directories) is generally not recommended
+as many programs (especially editors) update files atomically: it will write to
+a temporary file which is then moved to to destination, overwriting the original
+(or some variant thereof). The watcher on the original file is now lost, as that
+no longer exists.
+
+The upshot of this is that a power failure or crash won't leave a half-written
+file.
+
+Watch the parent directory and use `Event.Name` to filter out files you're not
+interested in. There is an example of this in `cmd/fsnotify/file.go`.
+
Platform-specific notes
-----------------------
### Linux
@@ -151,11 +182,3 @@ these platforms.
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
control the maximum number of open files.
-
-### macOS
-Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
-workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
-have a native FSEvents implementation (see [#11]).
-
-[#11]: https://github.com/fsnotify/fsnotify/issues/11
-[#15]: https://github.com/fsnotify/fsnotify/issues/15
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
index 1a95ad8e..28497f1d 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
@@ -1,10 +1,19 @@
//go:build solaris
// +build solaris
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
+
package fsnotify
import (
"errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "golang.org/x/sys/unix"
)
// Watcher watches a set of paths, delivering events on a channel.
@@ -17,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -33,16 +42,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -58,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -92,44 +107,129 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
+
+ mu sync.Mutex
+ port *unix.EventPort
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ dirs map[string]struct{} // Explicitly watched directories
+ watches map[string]struct{} // Explicitly watched non-directories
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
- return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+ return NewBufferedWatcher(0)
}
-// Close removes all watches and closes the events channel.
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
+ w := &Watcher{
+ Events: make(chan Event, sz),
+ Errors: make(chan error),
+ dirs: make(map[string]struct{}),
+ watches: make(map[string]struct{}),
+ done: make(chan struct{}),
+ }
+
+ var err error
+ w.port, err = unix.NewEventPort()
+ if err != nil {
+ return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// sendEvent attempts to send an event to the user, returning true if the event
+// was put in the channel successfully and false if the watcher has been closed.
+func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
+ select {
+ case w.Events <- Event{Name: name, Op: op}:
+ return true
+ case <-w.done:
+ return false
+ }
+}
+
+// sendError attempts to send an error to the user, returning true if the error
+// was put in the channel successfully and false if the watcher has been closed.
+func (w *Watcher) sendError(err error) (sent bool) {
+ select {
+ case w.Errors <- err:
+ return true
+ case <-w.done:
+ return false
+ }
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
- return nil
+ // Take the lock used by associateFile to prevent lingering events from
+ // being processed after the close
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.isClosed() {
+ return nil
+ }
+ close(w.done)
+ return w.port.Close()
}
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -139,15 +239,63 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+ if w.isClosed() {
+ return ErrClosed
+ }
+ if w.port.PathIsWatched(name) {
+ return nil
+ }
+
+ _ = getOptions(opts...)
+
+ // Currently we resolve symlinks that were explicitly requested to be
+ // watched. Otherwise we would use LStat here.
+ stat, err := os.Stat(name)
+ if err != nil {
+ return err
+ }
+
+ // Associate all files in the directory.
+ if stat.IsDir() {
+ err := w.handleDirectory(name, stat, true, w.associateFile)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.dirs[name] = struct{}{}
+ w.mu.Unlock()
+ return nil
+ }
+
+ err = w.associateFile(name, stat, true)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.watches[name] = struct{}{}
+ w.mu.Unlock()
return nil
}
@@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
+ if w.isClosed() {
+ return nil
+ }
+ if !w.port.PathIsWatched(name) {
+ return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
+ }
+
+ // The user has expressed an intent. Immediately remove this name from
+ // whichever watch list it might be in. If it's not in there the delete
+ // doesn't cause harm.
+ w.mu.Lock()
+ delete(w.watches, name)
+ delete(w.dirs, name)
+ w.mu.Unlock()
+
+ stat, err := os.Stat(name)
+ if err != nil {
+ return err
+ }
+
+ // Remove associations for every file in the directory.
+ if stat.IsDir() {
+ err := w.handleDirectory(name, stat, false, w.dissociateFile)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ err = w.port.DissociatePath(name)
+ if err != nil {
+ return err
+ }
+
return nil
}
+
+// readEvents contains the main loop that runs in a goroutine watching for events.
+func (w *Watcher) readEvents() {
+ // If this function returns, the watcher has been closed and we can close
+ // these channels
+ defer func() {
+ close(w.Errors)
+ close(w.Events)
+ }()
+
+ pevents := make([]unix.PortEvent, 8)
+ for {
+ count, err := w.port.Get(pevents, 1, nil)
+ if err != nil && err != unix.ETIME {
+ // Interrupted system call (count should be 0) ignore and continue
+ if errors.Is(err, unix.EINTR) && count == 0 {
+ continue
+ }
+ // Get failed because we called w.Close()
+ if errors.Is(err, unix.EBADF) && w.isClosed() {
+ return
+ }
+ // There was an error not caused by calling w.Close()
+ if !w.sendError(err) {
+ return
+ }
+ }
+
+ p := pevents[:count]
+ for _, pevent := range p {
+ if pevent.Source != unix.PORT_SOURCE_FILE {
+ // Event from unexpected source received; should never happen.
+ if !w.sendError(errors.New("Event from unexpected source received")) {
+ return
+ }
+ continue
+ }
+
+ err = w.handleEvent(&pevent)
+ if err != nil {
+ if !w.sendError(err) {
+ return
+ }
+ }
+ }
+ }
+}
+
+func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ // Handle all children of the directory.
+ for _, entry := range files {
+ finfo, err := entry.Info()
+ if err != nil {
+ return err
+ }
+ err = handler(filepath.Join(path, finfo.Name()), finfo, false)
+ if err != nil {
+ return err
+ }
+ }
+
+ // And finally handle the directory itself.
+ return handler(path, stat, follow)
+}
+
+// handleEvent might need to emit more than one fsnotify event if the events
+// bitmap matches more than one event type (e.g. the file was both modified and
+// had the attributes changed between when the association was created and the
+// when event was returned)
+func (w *Watcher) handleEvent(event *unix.PortEvent) error {
+ var (
+ events = event.Events
+ path = event.Path
+ fmode = event.Cookie.(os.FileMode)
+ reRegister = true
+ )
+
+ w.mu.Lock()
+ _, watchedDir := w.dirs[path]
+ _, watchedPath := w.watches[path]
+ w.mu.Unlock()
+ isWatched := watchedDir || watchedPath
+
+ if events&unix.FILE_DELETE != 0 {
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ reRegister = false
+ }
+ if events&unix.FILE_RENAME_FROM != 0 {
+ if !w.sendEvent(path, Rename) {
+ return nil
+ }
+ // Don't keep watching the new file name
+ reRegister = false
+ }
+ if events&unix.FILE_RENAME_TO != 0 {
+ // We don't report a Rename event for this case, because Rename events
+ // are interpreted as referring to the _old_ name of the file, and in
+ // this case the event would refer to the new name of the file. This
+ // type of rename event is not supported by fsnotify.
+
+ // inotify reports a Remove event in this case, so we simulate this
+ // here.
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ // Don't keep watching the file that was removed
+ reRegister = false
+ }
+
+ // The file is gone, nothing left to do.
+ if !reRegister {
+ if watchedDir {
+ w.mu.Lock()
+ delete(w.dirs, path)
+ w.mu.Unlock()
+ }
+ if watchedPath {
+ w.mu.Lock()
+ delete(w.watches, path)
+ w.mu.Unlock()
+ }
+ return nil
+ }
+
+ // If we didn't get a deletion the file still exists and we're going to have
+ // to watch it again. Let's Stat it now so that we can compare permissions
+ // and have what we need to continue watching the file
+
+ stat, err := os.Lstat(path)
+ if err != nil {
+ // This is unexpected, but we should still emit an event. This happens
+ // most often on "rm -r" of a subdirectory inside a watched directory We
+ // get a modify event of something happening inside, but by the time we
+ // get here, the sudirectory is already gone. Clearly we were watching
+ // this path but now it is gone. Let's tell the user that it was
+ // removed.
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ // Suppress extra write events on removed directories; they are not
+ // informative and can be confusing.
+ return nil
+ }
+
+ // resolve symlinks that were explicitly watched as we would have at Add()
+ // time. this helps suppress spurious Chmod events on watched symlinks
+ if isWatched {
+ stat, err = os.Stat(path)
+ if err != nil {
+ // The symlink still exists, but the target is gone. Report the
+ // Remove similar to above.
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ // Don't return the error
+ }
+ }
+
+ if events&unix.FILE_MODIFIED != 0 {
+ if fmode.IsDir() {
+ if watchedDir {
+ if err := w.updateDirectory(path); err != nil {
+ return err
+ }
+ } else {
+ if !w.sendEvent(path, Write) {
+ return nil
+ }
+ }
+ } else {
+ if !w.sendEvent(path, Write) {
+ return nil
+ }
+ }
+ }
+ if events&unix.FILE_ATTRIB != 0 && stat != nil {
+ // Only send Chmod if perms changed
+ if stat.Mode().Perm() != fmode.Perm() {
+ if !w.sendEvent(path, Chmod) {
+ return nil
+ }
+ }
+ }
+
+ if stat != nil {
+ // If we get here, it means we've hit an event above that requires us to
+ // continue watching the file or directory
+ return w.associateFile(path, stat, isWatched)
+ }
+ return nil
+}
+
+func (w *Watcher) updateDirectory(path string) error {
+ // The directory was modified, so we must find unwatched entities and watch
+ // them. If something was removed from the directory, nothing will happen,
+ // as everything else should still be watched.
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ for _, entry := range files {
+ path := filepath.Join(path, entry.Name())
+ if w.port.PathIsWatched(path) {
+ continue
+ }
+
+ finfo, err := entry.Info()
+ if err != nil {
+ return err
+ }
+ err = w.associateFile(path, finfo, false)
+ if err != nil {
+ if !w.sendError(err) {
+ return nil
+ }
+ }
+ if !w.sendEvent(path, Create) {
+ return nil
+ }
+ }
+ return nil
+}
+
+func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
+ if w.isClosed() {
+ return ErrClosed
+ }
+ // This is primarily protecting the call to AssociatePath but it is
+ // important and intentional that the call to PathIsWatched is also
+ // protected by this mutex. Without this mutex, AssociatePath has been seen
+ // to error out that the path is already associated.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.port.PathIsWatched(path) {
+ // Remove the old association in favor of this one If we get ENOENT,
+ // then while the x/sys/unix wrapper still thought that this path was
+ // associated, the underlying event port did not. This call will have
+ // cleared up that discrepancy. The most likely cause is that the event
+ // has fired but we haven't processed it yet.
+ err := w.port.DissociatePath(path)
+ if err != nil && err != unix.ENOENT {
+ return err
+ }
+ }
+ // FILE_NOFOLLOW means we watch symlinks themselves rather than their
+ // targets.
+ events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
+ if follow {
+ // We *DO* follow symlinks for explicitly watched entries.
+ events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
+ }
+ return w.port.AssociatePath(path, stat,
+ events,
+ stat.Mode())
+}
+
+func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
+ if !w.port.PathIsWatched(path) {
+ return nil
+ }
+ return w.port.DissociatePath(path)
+}
+
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string {
+ if w.isClosed() {
+ return nil
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches)+len(w.dirs))
+ for pathname := range w.dirs {
+ entries = append(entries, pathname)
+ }
+ for pathname := range w.watches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
index 54c77fbb..921c1c1e 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
@@ -1,5 +1,8 @@
-//go:build linux
-// +build linux
+//go:build linux && !appengine
+// +build linux,!appengine
+
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
@@ -26,9 +29,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -42,16 +45,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -67,14 +70,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -101,36 +110,148 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
// calling Fd(). See: https://github.com/golang/go/issues/26439
fd int
- mu sync.Mutex // Map access
inotifyFile *os.File
- watches map[string]*watch // Map of inotify watches (key: path)
- paths map[int]string // Map of watched paths (key: watch descriptor)
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- doneResp chan struct{} // Channel to respond to Close
+ watches *watches
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ closeMu sync.Mutex
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+type (
+ watches struct {
+ mu sync.RWMutex
+ wd map[uint32]*watch // wd → watch
+ path map[string]uint32 // pathname → wd
+ }
+ watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+ path string // Watch path.
+ }
+)
+
+func newWatches() *watches {
+ return &watches{
+ wd: make(map[uint32]*watch),
+ path: make(map[string]uint32),
+ }
+}
+
+func (w *watches) len() int {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ return len(w.wd)
+}
+
+func (w *watches) add(ww *watch) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.wd[ww.wd] = ww
+ w.path[ww.path] = ww.wd
+}
+
+func (w *watches) remove(wd uint32) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ delete(w.path, w.wd[wd].path)
+ delete(w.wd, wd)
+}
+
+func (w *watches) removePath(path string) (uint32, bool) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ wd, ok := w.path[path]
+ if !ok {
+ return 0, false
+ }
+
+ delete(w.path, path)
+ delete(w.wd, wd)
+
+ return wd, true
+}
+
+func (w *watches) byPath(path string) *watch {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ return w.wd[w.path[path]]
+}
+
+func (w *watches) byWd(wd uint32) *watch {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ return w.wd[wd]
+}
+
+func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ var existing *watch
+ wd, ok := w.path[path]
+ if ok {
+ existing = w.wd[wd]
+ }
+
+ upd, err := f(existing)
+ if err != nil {
+ return err
+ }
+ if upd != nil {
+ w.wd[upd.wd] = upd
+ w.path[upd.path] = upd.wd
+
+ if upd.wd != wd {
+ delete(w.wd, wd)
+ }
+ }
+
+ return nil
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
- // Create inotify fd
- // Need to set the FD to nonblocking mode in order for SetDeadline methods to work
- // Otherwise, blocking i/o operations won't terminate on close
+ return NewBufferedWatcher(0)
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
+ // Need to set nonblocking mode for SetDeadline to work, otherwise blocking
+ // I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
if fd == -1 {
return nil, errno
@@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) {
w := &Watcher{
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
- watches: make(map[string]*watch),
- paths: make(map[int]string),
- Events: make(chan Event),
+ watches: newWatches(),
+ Events: make(chan Event, sz),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
@@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e:
return true
case <-w.done:
+ return false
}
- return false
}
// Returns true if the error was sent, or false if watcher is closed.
@@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool {
}
}
-// Close removes all watches and closes the events channel.
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
- w.mu.Lock()
+ w.closeMu.Lock()
if w.isClosed() {
- w.mu.Unlock()
+ w.closeMu.Unlock()
return nil
}
-
- // Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
- w.mu.Unlock()
+ w.closeMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@@ -207,17 +325,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -227,44 +349,59 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- name = filepath.Clean(name)
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
- return errors.New("inotify instance already closed")
+ return ErrClosed
}
+ name = filepath.Clean(name)
+ _ = getOptions(opts...)
+
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
- w.mu.Lock()
- defer w.mu.Unlock()
- watchEntry := w.watches[name]
- if watchEntry != nil {
- flags |= watchEntry.flags | unix.IN_MASK_ADD
- }
- wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
- if wd == -1 {
- return errno
- }
+ return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
+ if existing != nil {
+ flags |= existing.flags | unix.IN_MASK_ADD
+ }
- if watchEntry == nil {
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- } else {
- watchEntry.wd = uint32(wd)
- watchEntry.flags = flags
- }
+ wd, err := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return nil, err
+ }
- return nil
+ if existing == nil {
+ return &watch{
+ wd: uint32(wd),
+ path: name,
+ flags: flags,
+ }, nil
+ }
+
+ existing.wd = uint32(wd)
+ existing.flags = flags
+ return existing, nil
+ })
}
// Remove stops monitoring the path for changes.
@@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
-
- // Fetch the watch.
- w.mu.Lock()
- defer w.mu.Unlock()
- watch, ok := w.watches[name]
+ if w.isClosed() {
+ return nil
+ }
+ return w.remove(filepath.Clean(name))
+}
- // Remove it from inotify.
+func (w *Watcher) remove(name string) error {
+ wd, ok := w.watches.removePath(name)
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
- // We successfully removed the watch if InotifyRmWatch doesn't return an
- // error, we need to clean up our internal state to ensure it matches
- // inotify's kernel state.
- delete(w.paths, int(watch.wd))
- delete(w.watches, name)
-
- // inotify_rm_watch will return EINVAL if the file has been deleted;
- // the inotify will already have been removed.
- // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
- // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
- // so that EINVAL means that the wd is being rm_watch()ed or its file removed
- // by another thread and we have not received IN_IGNORE event.
- success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ success, errno := unix.InotifyRmWatch(w.fd, wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case;
// The only two possible errors are:
@@ -312,28 +439,28 @@ func (w *Watcher) Remove(name string) error {
// are watching is deleted.
return errno
}
-
return nil
}
-// WatchList returns all paths added with [Add] (and are not yet removed).
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
- w.mu.Lock()
- defer w.mu.Unlock()
+ if w.isClosed() {
+ return nil
+ }
- entries := make([]string, 0, len(w.watches))
- for pathname := range w.watches {
+ entries := make([]string, 0, w.watches.len())
+ w.watches.mu.RLock()
+ for pathname := range w.watches.path {
entries = append(entries, pathname)
}
+ w.watches.mu.RUnlock()
return entries
}
-type watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
-}
-
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
@@ -367,14 +494,11 @@ func (w *Watcher) readEvents() {
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
- // If EOF is received. This should really never happen.
- err = io.EOF
+ err = io.EOF // If EOF is received. This should really never happen.
} else if n < 0 {
- // If an error occurred while reading.
- err = errno
+ err = errno // If an error occurred while reading.
} else {
- // Read was too short.
- err = errors.New("notify: short read in readEvents()")
+ err = errors.New("notify: short read in readEvents()") // Read was too short.
}
if !w.sendError(err) {
return
@@ -403,18 +527,29 @@ func (w *Watcher) readEvents() {
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
- w.mu.Lock()
- name, ok := w.paths[int(raw.Wd)]
- // IN_DELETE_SELF occurs when the file/directory being watched is removed.
- // This is a sign to clean up the maps, otherwise we are no longer in sync
- // with the inotify kernel state which has already deleted the watch
- // automatically.
- if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
- delete(w.paths, int(raw.Wd))
- delete(w.watches, name)
+ watch := w.watches.byWd(uint32(raw.Wd))
+
+ // inotify will automatically remove the watch on deletes; just need
+ // to clean our state here.
+ if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ w.watches.remove(watch.wd)
+ }
+ // We can't really update the state when a watched path is moved;
+ // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
+ // the watch.
+ if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
+ err := w.remove(watch.path)
+ if err != nil && !errors.Is(err, ErrNonExistentWatch) {
+ if !w.sendError(err) {
+ return
+ }
+ }
}
- w.mu.Unlock()
+ var name string
+ if watch != nil {
+ name = watch.path
+ }
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
index 29087469..063a0915 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
@@ -1,12 +1,14 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
+
package fsnotify
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -24,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -40,16 +42,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -65,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -99,18 +107,27 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
done chan struct{}
@@ -133,6 +150,18 @@ type pathInfo struct {
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(0)
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
@@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) {
paths: make(map[int]pathInfo),
fileExists: make(map[string]struct{}),
userWatches: make(map[string]struct{}),
- Events: make(chan Event),
+ Events: make(chan Event, sz),
Errors: make(chan error),
done: make(chan struct{}),
}
@@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e:
return true
case <-w.done:
+ return false
}
- return false
}
// Returns true if the error was sent, or false if watcher is closed.
@@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool {
case w.Errors <- err:
return true
case <-w.done:
+ return false
}
- return false
}
-// Close removes all watches and closes the events channel.
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
@@ -239,17 +268,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -259,15 +292,28 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+ _ = getOptions(opts...)
+
w.mu.Lock()
w.userWatches[name] = struct{}{}
w.mu.Unlock()
@@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
+ return w.remove(name, true)
+}
+
+func (w *Watcher) remove(name string, unwatchFiles bool) error {
name = filepath.Clean(name)
w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
@@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error {
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
- if isDir {
+ if unwatchFiles && isDir {
var pathsToRemove []string
w.mu.Lock()
for fd := range w.watchesByDir[name] {
@@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error {
}
w.mu.Unlock()
for _, name := range pathsToRemove {
- // Since these are internal, not much sense in propagating error
- // to the user, as that will just confuse them with an error about
- // a path they did not explicitly watch themselves.
+ // Since these are internal, not much sense in propagating error to
+ // the user, as that will just confuse them with an error about a
+ // path they did not explicitly watch themselves.
w.Remove(name)
}
}
-
return nil
}
-// WatchList returns all paths added with [Add] (and are not yet removed).
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
+ if w.isClosed {
+ return nil
+ }
entries := make([]string, 0, len(w.userWatches))
for pathname := range w.userWatches {
@@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string {
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
-// addWatch adds name to the watched file set.
-// The flags are interpreted as described in kevent(2).
-// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+// addWatch adds name to the watched file set; the flags are interpreted as
+// described in kevent(2).
+//
+// Returns the real path to the file which was added, with symlinks resolved.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
- // Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
- return "", errors.New("kevent instance already closed")
+ return "", ErrClosed
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
@@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
return "", nil
}
- // Follow Symlinks
- //
- // Linux can add unresolvable symlinks to the watch list without issue,
- // and Windows can't do symlinks period. To maintain consistency, we
- // will act like everything is fine if the link can't be resolved.
- // There will simply be no file events for broken symlinks. Hence the
- // returns of nil on errors.
+ // Follow Symlinks.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
- name, err = filepath.EvalSymlinks(name)
+ link, err := os.Readlink(name)
if err != nil {
+ // Return nil because Linux can add unresolvable symlinks to the
+ // watch list without problems, so maintain consistency with
+ // that. There will be no file events for broken symlinks.
+ // TODO: more specific check; returns os.PathError; ENOENT?
return "", nil
}
w.mu.Lock()
- _, alreadyWatching = w.watches[name]
+ _, alreadyWatching = w.watches[link]
w.mu.Unlock()
if alreadyWatching {
- return name, nil
+ // Add to watches so we don't get spurious Create events later
+ // on when we diff the directories.
+ w.watches[name] = 0
+ w.fileExists[name] = struct{}{}
+ return link, nil
}
+ name = link
fi, err = os.Lstat(name)
if err != nil {
return "", nil
@@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
}
// Retry on EINTR; open() can return EINTR in practice on macOS.
- // See #354, and go issues 11180 and 39237.
+ // See #354, and Go issues 11180 and 39237.
for {
watchfd, err = unix.Open(name, openMode, 0)
if err == nil {
@@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
w.watchesByDir[parentName] = watchesByDir
}
watchesByDir[watchfd] = struct{}{}
-
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
- // Watch the directory if it has not been watched before,
- // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ // Watch the directory if it has not been watched before, or if it was
+ // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
@@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
defer func() {
- err := unix.Close(w.kq)
- if err != nil {
- w.Errors <- err
- }
- unix.Close(w.closepipe[0])
close(w.Events)
close(w.Errors)
+ _ = unix.Close(w.kq)
+ unix.Close(w.closepipe[0])
}()
eventBuffer := make([]unix.Kevent_t, 10)
@@ -513,18 +573,8 @@ func (w *Watcher) readEvents() {
event := w.newEvent(path.name, mask)
- if path.isDir && !event.Has(Remove) {
- // Double check to make sure the directory exists. This can
- // happen when we do a rm -fr on a recursively watched folders
- // and we receive a modification event first but the folder has
- // been deleted and later receive the delete event.
- if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
- event.Op |= Remove
- }
- }
-
if event.Has(Rename) || event.Has(Remove) {
- w.Remove(event.Name)
+ w.remove(event.Name, false)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
@@ -540,26 +590,30 @@ func (w *Watcher) readEvents() {
}
if event.Has(Remove) {
- // Look for a file that may have overwritten this.
- // For example, mv f1 f2 will delete f2, then create f2.
+ // Look for a file that may have overwritten this; for example,
+ // mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
- // make sure the directory exists before we watch for changes. When we
- // do a recursive watch and perform rm -fr, the parent directory might
- // have gone missing, ignore the missing directory and let the
- // upcoming delete event remove the watch from the parent directory.
- if _, err := os.Lstat(fileDir); err == nil {
- w.sendDirectoryChangeEvents(fileDir)
+ err := w.sendDirectoryChangeEvents(fileDir)
+ if err != nil {
+ if !w.sendError(err) {
+ closed = true
+ }
}
}
} else {
filePath := filepath.Clean(event.Name)
- if fileInfo, err := os.Lstat(filePath); err == nil {
- w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ if fi, err := os.Lstat(filePath); err == nil {
+ err := w.sendFileCreatedEventIfNew(filePath, fi)
+ if err != nil {
+ if !w.sendError(err) {
+ closed = true
+ }
+ }
}
}
}
@@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
+ // No point sending a write and delete event at the same time: if it's gone,
+ // then it's gone.
+ if e.Op.Has(Write) && e.Op.Has(Remove) {
+ e.Op &^= Write
+ }
return e
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
- files, err := ioutil.ReadDir(dirPath)
+ files, err := os.ReadDir(dirPath)
if err != nil {
return err
}
- for _, fileInfo := range files {
- path := filepath.Join(dirPath, fileInfo.Name())
+ for _, f := range files {
+ path := filepath.Join(dirPath, f.Name())
+
+ fi, err := f.Info()
+ if err != nil {
+ return fmt.Errorf("%q: %w", path, err)
+ }
- cleanPath, err := w.internalWatch(path, fileInfo)
+ cleanPath, err := w.internalWatch(path, fi)
if err != nil {
// No permission to read the file; that's not a problem: just skip.
// But do add it to w.fileExists to prevent it from being picked up
@@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
cleanPath = filepath.Clean(path)
default:
- return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
+ return fmt.Errorf("%q: %w", path, err)
}
}
@@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dir string) {
- // Get all files
- files, err := ioutil.ReadDir(dir)
+func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
+ files, err := os.ReadDir(dir)
if err != nil {
- if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
- return
+ // Directory no longer exists: we can ignore this safely. kqueue will
+ // still give us the correct events.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
}
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
- // Search for new files
- for _, fi := range files {
- err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
+ for _, f := range files {
+ fi, err := f.Info()
if err != nil {
- return
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ }
+
+ err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
+ if err != nil {
+ // Don't need to send an error if this file isn't readable.
+ if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
+ return nil
+ }
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
}
+ return nil
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
-func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
@@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
}
// like watchDirectoryFiles (but without doing another ReadDir)
- filePath, err = w.internalWatch(filePath, fileInfo)
+ filePath, err = w.internalWatch(filePath, fi)
if err != nil {
return err
}
@@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
return nil
}
-func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
- if fileInfo.IsDir() {
- // mimic Linux providing delete events for subdirectories
- // but preserve the flags used if currently watching subdirectory
+func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
+ if fi.IsDir() {
+ // mimic Linux providing delete events for subdirectories, but preserve
+ // the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go
index a9bb1c3c..d34a23c0 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_other.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go
@@ -1,39 +1,169 @@
-//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
-// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
+// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
-import (
- "fmt"
- "runtime"
-)
+import "errors"
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct{}
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
+ Errors chan error
+}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
- return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
+ return nil, errors.New("fsnotify not supported on the current platform")
}
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- return nil
-}
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
+
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error { return nil }
+
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string { return nil }
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -43,17 +173,26 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- return nil
-}
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return nil }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
// Remove stops monitoring the path for changes.
//
@@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-func (w *Watcher) Remove(name string) error {
- return nil
-}
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(name string) error { return nil }
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
index ae392867..9bc91e5d 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
@@ -1,6 +1,13 @@
//go:build windows
// +build windows
+// Windows backend based on ReadDirectoryChangesW()
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
+//
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
+
package fsnotify
import (
@@ -27,9 +34,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -43,16 +50,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -68,14 +75,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -102,31 +115,52 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
- mu sync.Mutex // Protects access to watches, isClosed
- watches watchMap // Map of watches (key: i-number)
- isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Protects access to watches, closed
+ watches watchMap // Map of watches (key: i-number)
+ closed bool // Set to true when Close() is first called
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(50)
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
@@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) {
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
- Events: make(chan Event, 50),
+ Events: make(chan Event, sz),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
@@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) {
return w, nil
}
+func (w *Watcher) isClosed() bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.closed
+}
+
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
@@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool {
return false
}
-// Close removes all watches and closes the events channel.
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+ if w.isClosed() {
return nil
}
- w.isClosed = true
+
+ w.mu.Lock()
+ w.closed = true
w.mu.Unlock()
// Send "quit" message to the reader goroutine
@@ -188,17 +228,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -208,27 +252,41 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return errors.New("watcher already closed")
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+ if w.isClosed() {
+ return ErrClosed
+ }
+
+ with := getOptions(opts...)
+ if with.bufsize < 4096 {
+ return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
- w.mu.Unlock()
in := &input{
- op: opAddWatch,
- path: filepath.Clean(name),
- flags: sysFSALLEVENTS,
- reply: make(chan error),
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ bufsize: with.bufsize,
}
w.input <- in
if err := w.wakeupReader(); err != nil {
@@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
+ if w.isClosed() {
+ return nil
+ }
+
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
@@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply
}
-// WatchList returns all paths added with [Add] (and are not yet removed).
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
+ if w.isClosed() {
+ return nil
+ }
+
w.mu.Lock()
defer w.mu.Unlock()
@@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string {
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
const (
sysFSALLEVENTS = 0xfff
- sysFSATTRIB = 0x4
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
@@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
- if mask&sysFSATTRIB == sysFSATTRIB {
- e.Op |= Chmod
- }
return e
}
@@ -321,10 +388,11 @@ const (
)
type input struct {
- op int
- path string
- flags uint32
- reply chan error
+ op int
+ path string
+ flags uint32
+ bufsize int
+ reply chan error
}
type inode struct {
@@ -334,13 +402,14 @@ type inode struct {
}
type watch struct {
- ov windows.Overlapped
- ino *inode // i-number
- path string // Directory path
- mask uint64 // Directory itself is being watched with these notify flags
- names map[string]uint64 // Map of names being watched and their notify flags
- rename string // Remembers the old name while renaming a file
- buf [65536]byte // 64K buffer
+ ov windows.Overlapped
+ ino *inode // i-number
+ recurse bool // Recursive watch?
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf []byte // buffer, allocated later
}
type (
@@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) {
}
// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) error {
+func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
+ //pathname, recurse := recursivePath(pathname)
+ recurse := false
+
dir, err := w.getDir(pathname)
if err != nil {
return err
@@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
return os.NewSyscallError("CreateIoCompletionPort", err)
}
watchEntry = &watch{
- ino: ino,
- path: dir,
- names: make(map[string]uint64),
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ recurse: recurse,
+ buf: make([]byte, bufsize),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
@@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
+ pathname, recurse := recursivePath(pathname)
+
dir, err := w.getDir(pathname)
if err != nil {
return err
@@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error {
watch := w.watches.get(ino)
w.mu.Unlock()
+ if recurse && !watch.recurse {
+ return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
+ }
+
err = windows.CloseHandle(ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
@@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error {
return nil
}
- rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
- uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ // We need to pass the array, rather than the slice.
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
+ rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
+ (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
+ watch.recurse, mask, nil, &watch.ov, 0)
if rdErr != nil {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
@@ -563,9 +646,8 @@ func (w *Watcher) readEvents() {
runtime.LockOSThread()
for {
+ // This error is handled after the watch == nil check below.
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
- // This error is handled after the watch == nil check below. NOTE: this
- // seems odd, note sure if it's correct.
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
@@ -595,7 +677,7 @@ func (w *Watcher) readEvents() {
case in := <-w.input:
switch in.op {
case opAddWatch:
- in.reply <- w.addWatch(in.path, uint64(in.flags))
+ in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
@@ -605,6 +687,8 @@ func (w *Watcher) readEvents() {
}
switch qErr {
+ case nil:
+ // No error
case windows.ERROR_MORE_DATA:
if watch == nil {
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
@@ -626,13 +710,12 @@ func (w *Watcher) readEvents() {
default:
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
continue
- case nil:
}
var offset uint32
for {
if n == 0 {
- w.sendError(errors.New("short read in readEvents()"))
+ w.sendError(ErrEventOverflow)
break
}
@@ -703,8 +786,9 @@ func (w *Watcher) readEvents() {
// Error!
if offset >= n {
+ //lint:ignore ST1005 Windows should be capitalized
w.sendError(errors.New(
- "Windows system assumed buffer larger than it is, events have likely been missed."))
+ "Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
@@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
}
- if mask&sysFSATTRIB != 0 {
- m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
- }
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index 30a5bf0f..24c99cc4 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -1,13 +1,18 @@
-//go:build !plan9
-// +build !plan9
-
// Package fsnotify provides a cross-platform interface for file system
// notifications.
+//
+// Currently supported systems:
+//
+// Linux 2.6.32+ via inotify
+// BSD, macOS via kqueue
+// Windows via ReadDirectoryChangesW
+// illumos via FEN
package fsnotify
import (
"errors"
"fmt"
+ "path/filepath"
"strings"
)
@@ -33,34 +38,52 @@ type Op uint32
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
// full description, and check them with [Event.Has].
const (
+ // A new pathname was created.
Create Op = 1 << iota
+
+ // The pathname was written to; this does *not* mean the write has finished,
+ // and a write can be followed by more writes.
Write
+
+ // The path was removed; any watches on it will be removed. Some "remove"
+ // operations may trigger a Rename if the file is actually moved (for
+ // example "remove to trash" is often a rename).
Remove
+
+ // The path was renamed to something else; any watched on it will be
+ // removed.
Rename
+
+ // File attributes were changed.
+ //
+ // It's generally not recommended to take action on this event, as it may
+ // get triggered very frequently by some software. For example, Spotlight
+ // indexing on macOS, anti-virus software, backup software, etc.
Chmod
)
-// Common errors that can be reported by a watcher
+// Common errors that can be reported.
var (
- ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
- ErrEventOverflow = errors.New("fsnotify queue overflow")
+ ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
+ ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
+ ErrClosed = errors.New("fsnotify: watcher already closed")
)
-func (op Op) String() string {
+func (o Op) String() string {
var b strings.Builder
- if op.Has(Create) {
+ if o.Has(Create) {
b.WriteString("|CREATE")
}
- if op.Has(Remove) {
+ if o.Has(Remove) {
b.WriteString("|REMOVE")
}
- if op.Has(Write) {
+ if o.Has(Write) {
b.WriteString("|WRITE")
}
- if op.Has(Rename) {
+ if o.Has(Rename) {
b.WriteString("|RENAME")
}
- if op.Has(Chmod) {
+ if o.Has(Chmod) {
b.WriteString("|CHMOD")
}
if b.Len() == 0 {
@@ -70,7 +93,7 @@ func (op Op) String() string {
}
// Has reports if this operation has the given operation.
-func (o Op) Has(h Op) bool { return o&h == h }
+func (o Op) Has(h Op) bool { return o&h != 0 }
// Has reports if this event has the given operation.
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
@@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
func (e Event) String() string {
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}
+
+type (
+ addOpt func(opt *withOpts)
+ withOpts struct {
+ bufsize int
+ }
+)
+
+var defaultOpts = withOpts{
+ bufsize: 65536, // 64K
+}
+
+func getOptions(opts ...addOpt) withOpts {
+ with := defaultOpts
+ for _, o := range opts {
+ o(&with)
+ }
+ return with
+}
+
+// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
+//
+// This only has effect on Windows systems, and is a no-op for other backends.
+//
+// The default value is 64K (65536 bytes) which is the highest value that works
+// on all filesystems and should be enough for most applications, but if you
+// have a large burst of events it may not be enough. You can increase it if
+// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
+//
+// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
+func WithBufferSize(bytes int) addOpt {
+ return func(opt *withOpts) { opt.bufsize = bytes }
+}
+
+// Check if this path is recursive (ends with "/..." or "\..."), and return the
+// path with the /... stripped.
+func recursivePath(path string) (string, bool) {
+ if filepath.Base(path) == "..." {
+ return filepath.Dir(path), true
+ }
+ return path, false
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
index b09ef768..99012ae6 100644
--- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
+++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
@@ -2,8 +2,8 @@
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
setopt err_exit no_unset pipefail extended_glob
-# Simple script to update the godoc comments on all watchers. Probably took me
-# more time to write this than doing it manually, but ah well 🙃
+# Simple script to update the godoc comments on all watchers so you don't need
+# to update the same comment 5 times.
watcher=$(< 0 {
_, err := io.ReadFull(reader, content)
if err != nil {
- if err == io.EOF {
- return nil, read, io.ErrUnexpectedEOF
- }
- return nil, read, err
+ return nil, read, unexpectedEOF(err)
}
read += length
}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
index e8c43574..c501d819 100644
--- a/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
@@ -37,7 +37,7 @@ func readIdentifier(reader io.Reader) (Identifier, int, error) {
if Debug {
fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
}
- return Identifier{}, read, err
+ return Identifier{}, read, unexpectedEOF(err)
}
tagBytes++
read++
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/length.go b/vendor/github.com/go-asn1-ber/asn1-ber/length.go
index 9cc195d0..2c81cc3f 100644
--- a/vendor/github.com/go-asn1-ber/asn1-ber/length.go
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/length.go
@@ -13,7 +13,7 @@ func readLength(reader io.Reader) (length int, read int, err error) {
if Debug {
fmt.Printf("error reading length byte: %v\n", err)
}
- return 0, 0, err
+ return 0, 0, unexpectedEOF(err)
}
read++
@@ -47,7 +47,7 @@ func readLength(reader io.Reader) (length int, read int, err error) {
if Debug {
fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
}
- return 0, read, err
+ return 0, read, unexpectedEOF(err)
}
read++
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/real.go b/vendor/github.com/go-asn1-ber/asn1-ber/real.go
index 610a003a..9f637a57 100644
--- a/vendor/github.com/go-asn1-ber/asn1-ber/real.go
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/real.go
@@ -89,12 +89,18 @@ func parseBinaryFloat(v []byte) (float64, error) {
case 0x02:
expLen = 3
case 0x03:
+ if len(v) < 2 {
+ return 0.0, errors.New("invalid data")
+ }
expLen = int(v[0])
if expLen > 8 {
return 0.0, errors.New("too big value of exponent")
}
v = v[1:]
}
+ if expLen > len(v) {
+ return 0.0, errors.New("too big value of exponent")
+ }
buf, v = v[:expLen], v[expLen:]
exponent, err := ParseInt64(buf)
if err != nil {
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/util.go b/vendor/github.com/go-asn1-ber/asn1-ber/util.go
index 14dc87d7..da45e9fa 100644
--- a/vendor/github.com/go-asn1-ber/asn1-ber/util.go
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/util.go
@@ -6,14 +6,18 @@ func readByte(reader io.Reader) (byte, error) {
bytes := make([]byte, 1)
_, err := io.ReadFull(reader, bytes)
if err != nil {
- if err == io.EOF {
- return 0, io.ErrUnexpectedEOF
- }
return 0, err
}
return bytes[0], nil
}
+func unexpectedEOF(err error) error {
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+}
+
func isEOCPacket(p *Packet) bool {
return p != nil &&
p.Tag == TagEOC &&
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 00000000..0f646931
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 00000000..e810e6fe
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ WireVarint = 0
+ WireFixed32 = 5
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+ return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+ return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0
+ }
+ return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+ buf []byte
+ idx int
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+ b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+ b.buf = buf
+ b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+ return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+ return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+ var err error
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+ err := UnmarshalMerge(b.Unread(), m)
+ b.idx = len(b.buf)
+ return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset() { panic("not implemented") }
+func (m *unknownFields) ProtoMessage() { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+ m := MessageReflect(new(unknownFields))
+ m.SetUnknown(b)
+ b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+ fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+ b.buf = protowire.AppendVarint(b.buf, v)
+ return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+ return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+ return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+ b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+ return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+ b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+ return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+ b.buf = protowire.AppendBytes(b.buf, v)
+ return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+ b.buf = protowire.AppendString(b.buf, v)
+ return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+ var err error
+ b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+ v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+ v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+ v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+ v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+ if n < 0 {
+ return nil, protowire.ParseError(n)
+ }
+ b.idx += n
+ if alloc {
+ v = append([]byte(nil), v...)
+ }
+ return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+ v, n := protowire.ConsumeString(b.buf[b.idx:])
+ if n < 0 {
+ return "", protowire.ParseError(n)
+ }
+ b.idx += n
+ return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+ v, err := b.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+ v, n, err := consumeGroup(b.buf[b.idx:])
+ if err != nil {
+ return err
+ }
+ b.idx += n
+ return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+ b0 := b
+ depth := 1 // assume this follows a start group marker
+ for {
+ _, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return nil, 0, protowire.ParseError(tagLen)
+ }
+ b = b[tagLen:]
+
+ var valLen int
+ switch wtyp {
+ case protowire.VarintType:
+ _, valLen = protowire.ConsumeVarint(b)
+ case protowire.Fixed32Type:
+ _, valLen = protowire.ConsumeFixed32(b)
+ case protowire.Fixed64Type:
+ _, valLen = protowire.ConsumeFixed64(b)
+ case protowire.BytesType:
+ _, valLen = protowire.ConsumeBytes(b)
+ case protowire.StartGroupType:
+ depth++
+ case protowire.EndGroupType:
+ depth--
+ default:
+ return nil, 0, errors.New("proto: cannot parse reserved wire type")
+ }
+ if valLen < 0 {
+ return nil, 0, protowire.ParseError(valLen)
+ }
+ b = b[valLen:]
+
+ if depth == 0 {
+ return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 00000000..d399bf06
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+ if m != nil {
+ setDefaults(MessageReflect(m))
+ }
+}
+
+func setDefaults(m protoreflect.Message) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if !m.Has(fd) {
+ if fd.HasDefault() && fd.ContainingOneof() == nil {
+ v := fd.Default()
+ if fd.Kind() == protoreflect.BytesKind {
+ v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+ }
+ m.Set(fd, v)
+ }
+ continue
+ }
+ }
+
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ setDefaults(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ setDefaults(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ setDefaults(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 00000000..e8db57e0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,113 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+
+ protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+ // Deprecated: No longer returned.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // Deprecated: No longer returned.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+ // Deprecated: No longer returned.
+ ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: Do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: Do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+ DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+ return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+ return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+ return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 00000000..2187e877
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+func DiscardUnknown(m Message) {
+ if m != nil {
+ discardUnknown(MessageReflect(m))
+ }
+}
+
+func discardUnknown(m protoreflect.Message) {
+ m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ discardUnknown(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ discardUnknown(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ discardUnknown(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+
+ // Discard unknown fields.
+ if len(m.GetUnknown()) > 0 {
+ m.SetUnknown(nil)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000..42fc120c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,356 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+type (
+ // ExtensionDesc represents an extension descriptor and
+ // is used to interact with an extension field in a message.
+ //
+ // Variables of this type are generated in code by protoc-gen-go.
+ ExtensionDesc = protoimpl.ExtensionInfo
+
+ // ExtensionRange represents a range of message extensions.
+ // Used in code generated by protoc-gen-go.
+ ExtensionRange = protoiface.ExtensionRangeV1
+
+ // Deprecated: Do not use; this is an internal type.
+ Extension = protoimpl.ExtensionFieldV1
+
+ // Deprecated: Do not use; this is an internal type.
+ XXX_InternalExtensions = protoimpl.ExtensionFields
+)
+
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return false
+ }
+
+ // Check whether any populated known field matches the field number.
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ has = mr.Has(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ has = int32(fd.Number()) == xt.Field
+ return !has
+ })
+ }
+
+ // Check whether any unknown field matches the field number.
+ for b := mr.GetUnknown(); !has && len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ has = int32(num) == xt.Field
+ b = b[n:]
+ }
+ return has
+}
+
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ mr.Clear(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if int32(fd.Number()) == xt.Field {
+ mr.Clear(fd)
+ return false
+ }
+ return true
+ })
+ }
+ clearUnknown(mr, fieldNum(xt.Field))
+}
+
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.IsExtension() {
+ mr.Clear(fd)
+ }
+ return true
+ })
+ clearUnknown(mr, mr.Descriptor().ExtensionRanges())
+}
+
+// GetExtension retrieves a proto2 extended field from m.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Retrieve the unknown fields for this extension field.
+ var bo protoreflect.RawFields
+ for bi := mr.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if int32(num) == xt.Field {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+
+ // For type incomplete descriptors, only retrieve the unknown fields.
+ if xt.ExtensionType == nil {
+ return []byte(bo), nil
+ }
+
+ // If the extension field only exists as unknown fields, unmarshal it.
+ // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ if !mr.Has(xtd) && len(bo) > 0 {
+ m2 := mr.New()
+ if err := (proto.UnmarshalOptions{
+ Resolver: extensionResolver{xt},
+ }.Unmarshal(bo, m2.Interface())); err != nil {
+ return nil, err
+ }
+ if m2.Has(xtd) {
+ mr.Set(xtd, m2.Get(xtd))
+ clearUnknown(mr, fieldNum(xt.Field))
+ }
+ }
+
+ // Check whether the message has the extension field set or a default.
+ var pv protoreflect.Value
+ switch {
+ case mr.Has(xtd):
+ pv = mr.Get(xtd)
+ case xtd.HasDefault():
+ pv = xtd.Default()
+ default:
+ return nil, ErrMissingExtension
+ }
+
+ v := xt.InterfaceOf(pv)
+ rv := reflect.ValueOf(v)
+ if isScalarKind(rv.Kind()) {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ return v, nil
+}
+
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
+
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return nil, errNotExtendable
+ }
+
+ vs := make([]interface{}, len(xts))
+ for i, xt := range xts {
+ v, err := GetExtension(m, xt)
+ if err != nil {
+ if err == ErrMissingExtension {
+ continue
+ }
+ return vs, err
+ }
+ vs[i] = v
+ }
+ return vs, nil
+}
+
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return errNotExtendable
+ }
+
+ rv := reflect.ValueOf(v)
+ if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
+ }
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+ }
+ if isScalarKind(rv.Elem().Kind()) {
+ v = rv.Elem().Interface()
+ }
+ }
+
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ mr.Set(xtd, xt.ValueOf(v))
+ clearUnknown(mr, fieldNum(xt.Field))
+ return nil
+}
+
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ // Verify that the raw field is valid.
+ for b0 := b; len(b0) > 0; {
+ num, _, n := protowire.ConsumeField(b0)
+ if int32(num) != fnum {
+ panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+ }
+ b0 = b0[n:]
+ }
+
+ ClearExtension(m, &ExtensionDesc{Field: fnum})
+ mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
+
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Collect a set of known extension descriptors.
+ extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+ mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor)
+ if xd, ok := xt.Type().(*ExtensionDesc); ok {
+ extDescs[fd.Number()] = xd
+ }
+ }
+ return true
+ })
+
+ // Collect a set of unknown extension descriptors.
+ extRanges := mr.Descriptor().ExtensionRanges()
+ for b := mr.GetUnknown(); len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ if extRanges.Has(num) && extDescs[num] == nil {
+ extDescs[num] = nil
+ }
+ b = b[n:]
+ }
+
+ // Transpose the set of descriptors into a list.
+ var xts []*ExtensionDesc
+ for num, xt := range extDescs {
+ if xt == nil {
+ xt = &ExtensionDesc{Field: int32(num)}
+ }
+ xts = append(xts, xt)
+ }
+ return xts, nil
+}
+
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+ return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
+}
+
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+ switch k {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ return true
+ default:
+ return false
+ }
+}
+
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+ Has(protoreflect.FieldNumber) bool
+}) {
+ var bo protoreflect.RawFields
+ for bi := m.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if !remover.Has(num) {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+ if bi := m.GetUnknown(); len(bi) != len(bo) {
+ m.SetUnknown(bo)
+ }
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+ return protoreflect.FieldNumber(n1) == n2
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..dcdc2202
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
+type StructProperties struct {
+ // Prop are the properties for each field.
+ //
+ // Fields belonging to a oneof are stored in OneofTypes instead, with a
+ // single Properties representing the parent oneof held here.
+ //
+ // The order of Prop matches the order of fields in the Go struct.
+ // Struct fields that are not related to protobufs have a "XXX_" prefix
+ // in the Properties.Name and must be ignored by the user.
+ Prop []*Properties
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the protobuf field name.
+ OneofTypes map[string]*OneofProperties
+}
+
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
+type Properties struct {
+ // Name is a placeholder name with little meaningful semantic value.
+ // If the name has an "XXX_" prefix, the entire Properties must be ignored.
+ Name string
+ // OrigName is the protobuf field name or oneof name.
+ OrigName string
+ // JSONName is the JSON name for the protobuf field.
+ JSONName string
+ // Enum is a placeholder name for enums.
+ // For historical reasons, this is neither the Go name for the enum,
+ // nor the protobuf name for the enum.
+ Enum string // Deprecated: Do not use.
+ // Weak contains the full name of the weakly referenced message.
+ Weak string
+ // Wire is a string representation of the wire type.
+ Wire string
+ // WireType is the protobuf wire type for the field.
+ WireType int
+ // Tag is the protobuf field number.
+ Tag int
+ // Required reports whether this is a required field.
+ Required bool
+ // Optional reports whether this is a optional field.
+ Optional bool
+ // Repeated reports whether this is a repeated field.
+ Repeated bool
+ // Packed reports whether this is a packed repeated field of scalars.
+ Packed bool
+ // Proto3 reports whether this field operates under the proto3 syntax.
+ Proto3 bool
+ // Oneof reports whether this field belongs within a oneof.
+ Oneof bool
+
+ // Default is the default value in string form.
+ Default string
+ // HasDefault reports whether the field has a default value.
+ HasDefault bool
+
+ // MapKeyProp is the properties for the key field for a map field.
+ MapKeyProp *Properties
+ // MapValProp is the properties for the value field for a map field.
+ MapValProp *Properties
+}
+
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+ // Type is a pointer to the generated wrapper type for the field value.
+ // This is nil for messages that are not in the open-struct API.
+ Type reflect.Type
+ // Field is the index into StructProperties.Prop for the containing oneof.
+ Field int
+ // Prop is the properties for the field.
+ Prop *Properties
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += "," + strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != "" {
+ s += ",json=" + p.JSONName
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if len(p.Weak) > 0 {
+ s += ",weak=" + p.Weak
+ }
+ if p.Proto3 {
+ s += ",proto3"
+ }
+ if p.Oneof {
+ s += ",oneof"
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(tag string) {
+ // For example: "bytes,49,opt,name=foo,def=hello!"
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ p.OrigName = s[len("name="):]
+ case strings.HasPrefix(s, "json="):
+ p.JSONName = s[len("json="):]
+ case strings.HasPrefix(s, "enum="):
+ p.Enum = s[len("enum="):]
+ case strings.HasPrefix(s, "weak="):
+ p.Weak = s[len("weak="):]
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ p.Tag = int(n)
+ case s == "opt":
+ p.Optional = true
+ case s == "req":
+ p.Required = true
+ case s == "rep":
+ p.Repeated = true
+ case s == "varint" || s == "zigzag32" || s == "zigzag64":
+ p.Wire = s
+ p.WireType = WireVarint
+ case s == "fixed32":
+ p.Wire = s
+ p.WireType = WireFixed32
+ case s == "fixed64":
+ p.Wire = s
+ p.WireType = WireFixed64
+ case s == "bytes":
+ p.Wire = s
+ p.WireType = WireBytes
+ case s == "group":
+ p.Wire = s
+ p.WireType = WireStartGroup
+ case s == "packed":
+ p.Packed = true
+ case s == "proto3":
+ p.Proto3 = true
+ case s == "oneof":
+ p.Oneof = true
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
+ p.HasDefault = true
+ p.Default, i = tag[len("def="):], len(tag)
+ }
+ tag = strings.TrimPrefix(tag[i:], ",")
+ }
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+
+ if typ != nil && typ.Kind() == reflect.Map {
+ p.MapKeyProp = new(Properties)
+ p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+ p.MapValProp = new(Properties)
+ p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+ }
+}
+
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
+
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
+func GetProperties(t reflect.Type) *StructProperties {
+ if p, ok := propertiesCache.Load(t); ok {
+ return p.(*StructProperties)
+ }
+ p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+ return p.(*StructProperties)
+}
+
+func newProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+
+ var hasOneof bool
+ prop := new(StructProperties)
+
+ // Construct a list of properties for each field in the struct.
+ for i := 0; i < t.NumField(); i++ {
+ p := new(Properties)
+ f := t.Field(i)
+ tagField := f.Tag.Get("protobuf")
+ p.Init(f.Type, f.Name, tagField, &f)
+
+ tagOneof := f.Tag.Get("protobuf_oneof")
+ if tagOneof != "" {
+ hasOneof = true
+ p.OrigName = tagOneof
+ }
+
+ // Rename unrelated struct fields with the "XXX_" prefix since so much
+ // user code simply checks for this to exclude special fields.
+ if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+ p.Name = "XXX_" + p.Name
+ p.OrigName = "XXX_" + p.OrigName
+ } else if p.Weak != "" {
+ p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
+ }
+
+ prop.Prop = append(prop.Prop, p)
+ }
+
+ // Construct a mapping of oneof field names to properties.
+ if hasOneof {
+ var oneofWrappers []interface{}
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+ }
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+ }
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+ if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+ oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+ }
+ }
+
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, wrapper := range oneofWrappers {
+ p := &OneofProperties{
+ Type: reflect.ValueOf(wrapper).Type(), // *T
+ Prop: new(Properties),
+ }
+ f := p.Type.Elem().Field(0)
+ p.Prop.Name = f.Name
+ p.Prop.Parse(f.Tag.Get("protobuf"))
+
+ // Determine the struct field that contains this oneof.
+ // Each wrapper is assignable to exactly one parent field.
+ var foundOneof bool
+ for i := 0; i < t.NumField() && !foundOneof; i++ {
+ if p.Type.AssignableTo(t.Field(i).Type) {
+ p.Field = i
+ foundOneof = true
+ }
+ }
+ if !foundOneof {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+ prop.OneofTypes[p.Prop.OrigName] = p
+ }
+ }
+
+ return prop
+}
+
+func (sp *StructProperties) Len() int { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 00000000..5aee89c3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ ProtoPackageIsVersion1 = true
+ ProtoPackageIsVersion2 = true
+ ProtoPackageIsVersion3 = true
+ ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+ return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+ // Marshal formats the encoded bytes of the message.
+ // It should be deterministic and emit valid protobuf wire data.
+ // The caller takes ownership of the returned buffer.
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+ // Unmarshal parses the encoded bytes of the protobuf wire input.
+ // The provided buffer is only valid for during method call.
+ // It should not reset the receiver message.
+ Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+ // Merge merges the contents of src into the receiver message.
+ // It clones all data structures in src such that it aliases no mutable
+ // memory referenced by src.
+ Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+ err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+ if e.err != nil {
+ return e.err.Error()
+ }
+ return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+ if err := protoV2.CheckInitialized(m); err != nil {
+ return &RequiredNotSetError{err: err}
+ }
+ return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+ return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 00000000..066b4323
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,317 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+ // Decompress the descriptor.
+ zr, err := gzip.NewReader(bytes.NewReader(d))
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+ b, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+
+ // Construct a protoreflect.FileDescriptor from the raw descriptor.
+ // Note that DescBuilder.Build automatically registers the constructed
+ // file descriptor with the v2 registry.
+ protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+ // Locally cache the raw descriptor form for the file.
+ fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+ if v, ok := fileCache.Load(s); ok {
+ return v.(fileDescGZIP)
+ }
+
+ // Find the descriptor in the v2 registry.
+ var b []byte
+ if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+ b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
+ }
+
+ // Locally cache the raw descriptor form for the file.
+ if len(b) > 0 {
+ v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+ return v.(fileDescGZIP)
+ }
+ return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+ if _, ok := enumCache.Load(s); ok {
+ panic("proto: duplicate enum registered: " + s)
+ }
+ enumCache.Store(s, m)
+
+ // This does not forward registration to the v2 registry since this API
+ // lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+
+ // Check whether the cache is stale. If the number of files in the current
+ // package differs, then it means that some enums may have been recently
+ // registered upstream that we do not know about.
+ var protoPkg protoreflect.FullName
+ if i := strings.LastIndexByte(s, '.'); i >= 0 {
+ protoPkg = protoreflect.FullName(s[:i])
+ }
+ v, _ := numFilesCache.Load(protoPkg)
+ numFiles, _ := v.(int)
+ if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+ return nil // cache is up-to-date; was not found earlier
+ }
+
+ // Update the enum cache for all enums declared in the given proto package.
+ numFiles = 0
+ protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+ walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+ name := protoimpl.X.LegacyEnumName(ed)
+ if _, ok := enumCache.Load(name); !ok {
+ m := make(enumsByName)
+ evs := ed.Values()
+ for i := evs.Len() - 1; i >= 0; i-- {
+ ev := evs.Get(i)
+ m[string(ev.Name())] = int32(ev.Number())
+ }
+ enumCache.LoadOrStore(name, m)
+ }
+ })
+ numFiles++
+ return true
+ })
+ numFilesCache.Store(protoPkg, numFiles)
+
+ // Check cache again for enum map.
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+ return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+ Enums() protoreflect.EnumDescriptors
+ Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+ eds := d.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ }
+ mds := d.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ walkEnums(mds.Get(i), f)
+ }
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+ mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+ if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+ messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+ t := reflect.TypeOf(m)
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid map kind: %v", t))
+ }
+ if _, ok := messageTypeCache.Load(s); ok {
+ panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+ }
+ messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+ if v, ok := messageTypeCache.Load(s); ok {
+ return v.(reflect.Type)
+ }
+
+ // Derive the message type from the v2 registry.
+ var t reflect.Type
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+ t = messageGoType(mt)
+ }
+
+ // If we could not get a concrete type, it is possible that it is a
+ // pseudo-message for a map entry.
+ if t == nil {
+ d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+ if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+ kt := goTypeForField(md.Fields().ByNumber(1))
+ vt := goTypeForField(md.Fields().ByNumber(2))
+ t = reflect.MapOf(kt, vt)
+ }
+ }
+
+ // Locally cache the message type for the given name.
+ if t != nil {
+ v, _ := messageTypeCache.LoadOrStore(s, t)
+ return v.(reflect.Type)
+ }
+ return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+ switch k := fd.Kind(); k {
+ case protoreflect.EnumKind:
+ if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+ return enumGoType(et)
+ }
+ return reflect.TypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+ return messageGoType(mt)
+ }
+ return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+ default:
+ return reflect.TypeOf(fd.Default().Interface())
+ }
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+ return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+ return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+ if m == nil {
+ return ""
+ }
+ if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+ return m.XXX_MessageName()
+ }
+ return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+ if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+ panic(err)
+ }
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+ // Check whether the cache is stale. If the number of extensions for
+ // the given message differs, then it means that some extensions were
+ // recently registered upstream that we do not know about.
+ s := MessageName(m)
+ v, _ := extensionCache.Load(s)
+ xs, _ := v.(extensionsByNumber)
+ if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+ return xs // cache is up-to-date
+ }
+
+ // Cache is stale, re-compute the extensions map.
+ xs = make(extensionsByNumber)
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+ if xd, ok := xt.(*ExtensionDesc); ok {
+ xs[int32(xt.TypeDescriptor().Number())] = xd
+ } else {
+ // TODO: This implies that the protoreflect.ExtensionType is a
+ // custom type not generated by protoc-gen-go. We could try and
+ // convert the type to an ExtensionDesc.
+ }
+ return true
+ })
+ extensionCache.Store(s, xs)
+ return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 00000000..47eb3e44
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+ Message string
+
+ // Deprecated: Do not use.
+ Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+ if wrapTextUnmarshalV2 {
+ return e.Message
+ }
+ if e.Line == 1 {
+ return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+ }
+ return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+ if u, ok := m.(encoding.TextUnmarshaler); ok {
+ return u.UnmarshalText([]byte(s))
+ }
+
+ m.Reset()
+ mi := MessageV2(m)
+
+ if wrapTextUnmarshalV2 {
+ err := prototext.UnmarshalOptions{
+ AllowPartial: true,
+ }.Unmarshal([]byte(s), mi)
+ if err != nil {
+ return &ParseError{Message: err.Error()}
+ }
+ return checkRequiredNotSet(mi)
+ } else {
+ if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+ return err
+ }
+ return checkRequiredNotSet(mi)
+ }
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ seen := make(map[protoreflect.FieldNumber]bool)
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := protoreflect.Name(tok.value)
+ fd := fds.ByName(name)
+ switch {
+ case fd == nil:
+ gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+ fd = nil
+ case fd.IsWeak() && fd.Message().IsPlaceholder():
+ fd = nil
+ }
+ if fd == nil {
+ typeName := string(md.FullName())
+ if m, ok := m.Interface().(Message); ok {
+ t := reflect.TypeOf(m)
+ if t.Kind() == reflect.Ptr {
+ typeName = t.Elem().String()
+ }
+ }
+ return p.errorf("unknown field name %q in %v", name, typeName)
+ }
+ if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+ }
+ if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+ return p.errorf("non-repeated field %q was repeated", fd.Name())
+ }
+ seen[fd.Number()] = true
+
+ // Consume any colon.
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ if v, err = p.unmarshalValue(v, fd); err != nil {
+ return err
+ }
+ m.Set(fd, v)
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+ name, err := p.consumeExtensionOrAnyName()
+ if err != nil {
+ return err
+ }
+
+ // If it contains a slash, it's an Any type URL.
+ if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+ if err != nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+ }
+ m2 := mt.New()
+ if err := p.unmarshalMessage(m2, terminator); err != nil {
+ return err
+ }
+ b, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+ }
+
+ urlFD := m.Descriptor().Fields().ByName("type_url")
+ valFD := m.Descriptor().Fields().ByName("value")
+ if seen[urlFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+ }
+ if seen[valFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+ }
+ m.Set(urlFD, protoreflect.ValueOfString(name))
+ m.Set(valFD, protoreflect.ValueOfBytes(b))
+ seen[urlFD.Number()] = true
+ seen[valFD.Number()] = true
+ return nil
+ }
+
+ xname := protoreflect.FullName(name)
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(m.Descriptor()) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ return p.errorf("unrecognized extension %q", name)
+ }
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+ }
+
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ v, err = p.unmarshalValue(v, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch {
+ case fd.IsList():
+ lv := v.List()
+ var err error
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return v, nil
+ }
+
+ // One value of the repeated field.
+ p.back()
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+ return v, nil
+ case fd.IsMap():
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order.
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ keyFD := fd.MapKey()
+ valFD := fd.MapValue()
+
+ mv := v.Map()
+ kv := keyFD.Default()
+ vv := mv.NewValue()
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ var err error
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return v, err
+ }
+ if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ case "value":
+ if err := p.checkForColon(valFD); err != nil {
+ return v, err
+ }
+ if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ default:
+ p.back()
+ return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+ mv.Set(kv.MapKey(), vv)
+ return v, nil
+ default:
+ p.back()
+ return p.unmarshalSingularValue(v, fd)
+ }
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ switch tok.value {
+ case "true", "1", "t", "True":
+ return protoreflect.ValueOfBool(true), nil
+ case "false", "0", "f", "False":
+ return protoreflect.ValueOfBool(false), nil
+ }
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfUint32(uint32(x)), nil
+ }
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfUint64(uint64(x)), nil
+ }
+ case protoreflect.FloatKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 32); err == nil {
+ return protoreflect.ValueOfFloat32(float32(x)), nil
+ }
+ case protoreflect.DoubleKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 64); err == nil {
+ return protoreflect.ValueOfFloat64(float64(x)), nil
+ }
+ case protoreflect.StringKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfString(tok.unquoted), nil
+ }
+ case protoreflect.BytesKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+ }
+ case protoreflect.EnumKind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+ }
+ vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+ if vd != nil {
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ err := p.unmarshalMessage(v.Message(), terminator)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+ return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ if fd.Message() == nil {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(rune(i)), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 00000000..a31134ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line)
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+ b, err := tm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+ b, _ := tm.marshal(m)
+ return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return []byte(""), nil
+ }
+
+ if wrapTextMarshalV2 {
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ return m.MarshalText()
+ }
+
+ opts := prototext.MarshalOptions{
+ AllowPartial: true,
+ EmitUnknown: true,
+ }
+ if !tm.Compact {
+ opts.Indent = " "
+ }
+ if !tm.ExpandAny {
+ opts.Resolver = (*protoregistry.Types)(nil)
+ }
+ return opts.Marshal(mr.Interface())
+ } else {
+ w := &textWriter{
+ compact: tm.Compact,
+ expandAny: tm.ExpandAny,
+ complete: true,
+ }
+
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ b, err := m.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ w.Write(b)
+ return w.buf, nil
+ }
+
+ err := w.writeMessage(mr)
+ return w.buf, err
+ }
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+ newline = []byte("\n")
+ endBraceNewline = []byte("}\n")
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ compact bool // same as TextMarshaler.Compact
+ expandAny bool // same as TextMarshaler.ExpandAny
+ complete bool // whether the current position is a complete line
+ indent int // indentation level; never negative
+ buf []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, p...)
+ w.complete = false
+ return len(p), nil
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ w.buf = append(w.buf, ' ')
+ n++
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ if i+1 < len(frags) {
+ w.buf = append(w.buf, '\n')
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, c)
+ w.complete = c == '\n'
+ return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+
+ if fd.Kind() != protoreflect.GroupKind {
+ w.buf = append(w.buf, fd.Name()...)
+ w.WriteByte(':')
+ } else {
+ // Use message type name for group field name.
+ w.buf = append(w.buf, fd.Message().Name()...)
+ }
+
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+ md := m.Descriptor()
+ fdURL := md.Fields().ByName("type_url")
+ fdVal := md.Fields().ByName("value")
+
+ url := m.Get(fdURL).String()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return false, nil
+ }
+
+ b := m.Get(fdVal).Bytes()
+ m2 := mt.New()
+ if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ if requiresQuotes(url) {
+ w.writeQuotedString(url)
+ } else {
+ w.Write([]byte(url))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.indent++
+ }
+ if err := w.writeMessage(m2); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.indent--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if w.expandAny && md.FullName() == "google.protobuf.Any" {
+ if canExpand, err := w.writeProto3Any(m); canExpand {
+ return err
+ }
+ }
+
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ lv := m.Get(fd).List()
+ for j := 0; j < lv.Len(); j++ {
+ w.writeName(fd)
+ v := lv.Get(j)
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := m.Get(fd).Map()
+
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+ for _, entry := range entries {
+ w.writeName(fd)
+ w.WriteByte('<')
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ w.writeName(kfd)
+ if err := w.writeSingularValue(entry.key, kfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.writeName(vfd)
+ if err := w.writeSingularValue(entry.val, vfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.indent--
+ w.WriteByte('>')
+ w.WriteByte('\n')
+ }
+ default:
+ w.writeName(fd)
+ if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ }
+
+ if b := m.GetUnknown(); len(b) > 0 {
+ w.writeUnknownFields(b)
+ }
+ return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ switch fd.Kind() {
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ switch vf := v.Float(); {
+ case math.IsInf(vf, +1):
+ w.Write(posInf)
+ case math.IsInf(vf, -1):
+ w.Write(negInf)
+ case math.IsNaN(vf):
+ w.Write(nan)
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ case protoreflect.StringKind:
+ // NOTE: This does not validate UTF-8 for historical reasons.
+ w.writeQuotedString(string(v.String()))
+ case protoreflect.BytesKind:
+ w.writeQuotedString(string(v.Bytes()))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var bra, ket byte = '<', '>'
+ if fd.Kind() == protoreflect.GroupKind {
+ bra, ket = '{', '}'
+ }
+ w.WriteByte(bra)
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ m := v.Message()
+ if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+ b, err := m2.MarshalText()
+ if err != nil {
+ return err
+ }
+ w.Write(b)
+ } else {
+ w.writeMessage(m)
+ }
+ w.indent--
+ w.WriteByte(ket)
+ case protoreflect.EnumKind:
+ if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+ fmt.Fprint(w, ev.Name())
+ } else {
+ fmt.Fprint(w, v.Enum())
+ }
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+ w.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; c {
+ case '\n':
+ w.buf = append(w.buf, `\n`...)
+ case '\r':
+ w.buf = append(w.buf, `\r`...)
+ case '\t':
+ w.buf = append(w.buf, `\t`...)
+ case '"':
+ w.buf = append(w.buf, `\"`...)
+ case '\\':
+ w.buf = append(w.buf, `\\`...)
+ default:
+ if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+ w.buf = append(w.buf, c)
+ } else {
+ w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+ if !w.compact {
+ fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+ }
+
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+
+ if wtyp == protowire.EndGroupType {
+ w.indent--
+ w.Write(endBraceNewline)
+ continue
+ }
+ fmt.Fprint(w, num)
+ if wtyp != protowire.StartGroupType {
+ w.WriteByte(':')
+ }
+ if !w.compact || wtyp == protowire.StartGroupType {
+ w.WriteByte(' ')
+ }
+ switch wtyp {
+ case protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed32Type:
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed64Type:
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.BytesType:
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprintf(w, "%q", v)
+ case protowire.StartGroupType:
+ w.WriteByte('{')
+ w.indent++
+ default:
+ fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+ }
+ w.WriteByte('\n')
+ }
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if md.ExtensionRanges().Len() == 0 {
+ return nil
+ }
+
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ // For message set, use the name of the message as the extension name.
+ name := string(ext.desc.FullName())
+ if isMessageSet(ext.desc.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ if !ext.desc.IsList() {
+ if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+ return err
+ }
+ } else {
+ lv := ext.val.List()
+ for i := 0; i < lv.Len(); i++ {
+ if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ fmt.Fprintf(w, "[%s]:", name)
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ for i := 0; i < w.indent*2; i++ {
+ w.buf = append(w.buf, ' ')
+ }
+ w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 00000000..d7c28da5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ if m == nil {
+ return 0
+ }
+ mi := MessageV2(m)
+ return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ b, err := marshalAppend(nil, m, false)
+ if b == nil {
+ b = zeroBytes
+ }
+ return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return nil, ErrNil
+ }
+ mi := MessageV2(m)
+ nbuf, err := protoV2.MarshalOptions{
+ Deterministic: deterministic,
+ AllowPartial: true,
+ }.MarshalAppend(buf, mi)
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) == len(nbuf) {
+ if !mi.ProtoReflect().IsValid() {
+ return buf, ErrNil
+ }
+ }
+ return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+ m.Reset()
+ return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+ mi := MessageV2(m)
+ out, err := protoV2.UnmarshalOptions{
+ AllowPartial: true,
+ Merge: true,
+ }.UnmarshalState(protoiface.UnmarshalInput{
+ Buf: b,
+ Message: mi.ProtoReflect(),
+ })
+ if err != nil {
+ return err
+ }
+ if out.Flags&protoiface.UnmarshalInitialized > 0 {
+ return nil
+ }
+ return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 00000000..398e3485
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 00000000..16686a65
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/empty/empty.proto
+
+package empty
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/empty.proto.
+
+type Empty = emptypb.Empty
+
+var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
+ 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
+ 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
+func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a60..00000000
--- a/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4.3
- - 1.5.3
- - tip
-
-script:
- - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 00000000..7ec5ac7e
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09f..a502fdc5 100644
--- a/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
We definitely welcome patches and contribution to this project!
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
index f765a46f..3e9a6188 100644
--- a/vendor/github.com/google/uuid/README.md
+++ b/vendor/github.com/google/uuid/README.md
@@ -1,6 +1,6 @@
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
###### Documentation
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
index b404f4be..dc60082d 100644
--- a/vendor/github.com/google/uuid/hash.go
+++ b/vendor/github.com/google/uuid/hash.go
@@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
)
// NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
index 24b78edc..b2a0bc87 100644
--- a/vendor/github.com/google/uuid/node_js.go
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -7,6 +7,6 @@
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index e6ef06cd..c3511292 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@ func setClockSequence(seq int) {
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1 and 2 UUIDs.
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- return Time(time)
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
}
// ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index a57207ae..5232b486 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
return ok
}
-// Parse decodes s into a UUID or returns an error. Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
@@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) {
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
- if strings.ToLower(s[:9]) != "urn:uuid:" {
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
@@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) {
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
@@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid
}
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
@@ -292,3 +351,15 @@ func DisableRandPool() {
poolMu.Lock()
poolPos = randPoolSize
}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 00000000..339a959a
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 00000000..3167b643
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig
new file mode 100644
index 00000000..2940ec92
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.editorconfig
@@ -0,0 +1,20 @@
+; https://editorconfig.org/
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = space
+indent_size = 2
+
+[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
+indent_style = tab
+indent_size = 4
+
+[*.md]
+indent_size = 4
+trim_trailing_whitespace = false
+
+eclint_indent_style = unset
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
index cd3fcd1e..84039fec 100644
--- a/vendor/github.com/gorilla/websocket/.gitignore
+++ b/vendor/github.com/gorilla/websocket/.gitignore
@@ -1,25 +1 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-.idea/
-*.iml
+coverage.coverprofile
diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml
new file mode 100644
index 00000000..34882139
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.golangci.yml
@@ -0,0 +1,3 @@
+run:
+ skip-dirs:
+ - examples/*.go
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
deleted file mode 100644
index 1931f400..00000000
--- a/vendor/github.com/gorilla/websocket/AUTHORS
+++ /dev/null
@@ -1,9 +0,0 @@
-# This is the official list of Gorilla WebSocket authors for copyright
-# purposes.
-#
-# Please keep the list sorted.
-
-Gary Burd
-Google LLC (https://opensource.google.com/)
-Joachim Bauch
-
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
index 9171c972..bb9d80bc 100644
--- a/vendor/github.com/gorilla/websocket/LICENSE
+++ b/vendor/github.com/gorilla/websocket/LICENSE
@@ -1,22 +1,27 @@
-Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
+modification, are permitted provided that the following conditions are
+met:
- Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
- Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile
new file mode 100644
index 00000000..603a63f5
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/Makefile
@@ -0,0 +1,34 @@
+GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
+GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+
+GO_SEC=$(shell which gosec 2> /dev/null || echo '')
+GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
+
+GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
+GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
+
+.PHONY: golangci-lint
+golangci-lint:
+ $(if $(GO_LINT), ,go install $(GO_LINT_URI))
+ @echo "##### Running golangci-lint"
+ golangci-lint run -v
+
+.PHONY: gosec
+gosec:
+ $(if $(GO_SEC), ,go install $(GO_SEC_URI))
+ @echo "##### Running gosec"
+ gosec -exclude-dir examples ./...
+
+.PHONY: govulncheck
+govulncheck:
+ $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
+ @echo "##### Running govulncheck"
+ govulncheck ./...
+
+.PHONY: verify
+verify: golangci-lint gosec govulncheck
+
+.PHONY: test
+test:
+ @echo "##### Running tests"
+ go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
index 2517a287..1fd5e9c4 100644
--- a/vendor/github.com/gorilla/websocket/README.md
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -1,17 +1,14 @@
-# Gorilla WebSocket
+# gorilla/websocket
-[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
-[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket)
+![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg)
+[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket)
+[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
+[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge)
-Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
-[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5)
----
-
-âš ï¸ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)**
-
----
### Documentation
@@ -20,6 +17,7 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool)
### Status
@@ -36,4 +34,3 @@ package API is stable.
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
-
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
index 2efd8355..815b0ca5 100644
--- a/vendor/github.com/gorilla/websocket/client.go
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -9,14 +9,18 @@ import (
"context"
"crypto/tls"
"errors"
+ "fmt"
"io"
- "io/ioutil"
+ "log"
+
"net"
"net/http"
"net/http/httptrace"
"net/url"
"strings"
"time"
+
+ "golang.org/x/net/proxy"
)
// ErrBadHandshake is returned when the server response to opening handshake is
@@ -224,6 +228,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
k == "Connection" ||
k == "Sec-Websocket-Key" ||
k == "Sec-Websocket-Version" ||
+ //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution
k == "Sec-Websocket-Extensions" ||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
@@ -289,7 +294,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
}
err = c.SetDeadline(deadline)
if err != nil {
- c.Close()
+ if err := c.Close(); err != nil {
+ log.Printf("websocket: failed to close network connection: %v", err)
+ }
return nil, err
}
return c, nil
@@ -303,7 +310,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
return nil, nil, err
}
if proxyURL != nil {
- dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial))
if err != nil {
return nil, nil, err
}
@@ -318,18 +325,20 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
}
netConn, err := netDial("tcp", hostPort)
+ if err != nil {
+ return nil, nil, err
+ }
if trace != nil && trace.GotConn != nil {
trace.GotConn(httptrace.GotConnInfo{
Conn: netConn,
})
}
- if err != nil {
- return nil, nil, err
- }
defer func() {
if netConn != nil {
- netConn.Close()
+ if err := netConn.Close(); err != nil {
+ log.Printf("websocket: failed to close network connection: %v", err)
+ }
}
}()
@@ -370,6 +379,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
resp, err := http.ReadResponse(conn.br, req)
if err != nil {
+ if d.TLSClientConfig != nil {
+ for _, proto := range d.TLSClientConfig.NextProtos {
+ if proto != "http/1.1" {
+ return nil, nil, fmt.Errorf(
+ "websocket: protocol %q was given but is not supported;"+
+ "sharing tls.Config with net/http Transport can cause this error: %w",
+ proto, err,
+ )
+ }
+ }
+ }
return nil, nil, err
}
@@ -388,7 +408,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
// debugging.
buf := make([]byte, 1024)
n, _ := io.ReadFull(resp.Body, buf)
- resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ resp.Body = io.NopCloser(bytes.NewReader(buf[:n]))
return nil, resp, ErrBadHandshake
}
@@ -406,17 +426,19 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
break
}
- resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ resp.Body = io.NopCloser(bytes.NewReader([]byte{}))
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
- netConn.SetDeadline(time.Time{})
+ if err := netConn.SetDeadline(time.Time{}); err != nil {
+ return nil, nil, err
+ }
netConn = nil // to avoid close in defer.
return conn, resp, nil
}
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
- return &tls.Config{}
+ return &tls.Config{MinVersion: tls.VersionTLS12}
}
return cfg.Clone()
}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
index 813ffb1e..9fed0ef5 100644
--- a/vendor/github.com/gorilla/websocket/compression.go
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -8,6 +8,7 @@ import (
"compress/flate"
"errors"
"io"
+ "log"
"strings"
"sync"
)
@@ -33,7 +34,9 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
"\x01\x00\x00\xff\xff"
fr, _ := flateReaderPool.Get().(io.ReadCloser)
- fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil {
+ panic(err)
+ }
return &flateReadWrapper{fr}
}
@@ -132,7 +135,9 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) {
// Preemptively place the reader back in the pool. This helps with
// scenarios where the application does not call NextReader() soon after
// this final read.
- r.Close()
+ if err := r.Close(); err != nil {
+ log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err)
+ }
}
return n, err
}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
index 331eebc8..221e6cf7 100644
--- a/vendor/github.com/gorilla/websocket/conn.go
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -6,11 +6,11 @@ package websocket
import (
"bufio"
+ "crypto/rand"
"encoding/binary"
"errors"
"io"
- "io/ioutil"
- "math/rand"
+ "log"
"net"
"strconv"
"strings"
@@ -181,13 +181,20 @@ var (
errInvalidControlFrame = errors.New("websocket: invalid control frame")
)
+// maskRand is an io.Reader for generating mask bytes. The reader is initialized
+// to crypto/rand Reader. Tests swap the reader to a math/rand reader for
+// reproducible results.
+var maskRand = rand.Reader
+
+// newMaskKey returns a new 32 bit value for masking client frames.
func newMaskKey() [4]byte {
- n := rand.Uint32()
- return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+ var k [4]byte
+ _, _ = io.ReadFull(maskRand, k[:])
+ return k
}
func hideTempErr(err error) error {
- if e, ok := err.(net.Error); ok && e.Temporary() {
+ if e, ok := err.(net.Error); ok {
err = &netError{msg: e.Error(), timeout: e.Timeout()}
}
return err
@@ -372,7 +379,9 @@ func (c *Conn) read(n int) ([]byte, error) {
if err == io.EOF {
err = errUnexpectedEOF
}
- c.br.Discard(len(p))
+ if _, err := c.br.Discard(len(p)); err != nil {
+ return p, err
+ }
return p, err
}
@@ -387,7 +396,9 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error
return err
}
- c.conn.SetWriteDeadline(deadline)
+ if err := c.conn.SetWriteDeadline(deadline); err != nil {
+ return c.writeFatal(err)
+ }
if len(buf1) == 0 {
_, err = c.conn.Write(buf0)
} else {
@@ -397,7 +408,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error
return c.writeFatal(err)
}
if frameType == CloseMessage {
- c.writeFatal(ErrCloseSent)
+ _ = c.writeFatal(ErrCloseSent)
}
return nil
}
@@ -438,7 +449,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er
d := 1000 * time.Hour
if !deadline.IsZero() {
- d = deadline.Sub(time.Now())
+ d = time.Until(deadline)
if d < 0 {
return errWriteTimeout
}
@@ -460,13 +471,15 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er
return err
}
- c.conn.SetWriteDeadline(deadline)
+ if err := c.conn.SetWriteDeadline(deadline); err != nil {
+ return c.writeFatal(err)
+ }
_, err = c.conn.Write(buf)
if err != nil {
return c.writeFatal(err)
}
if messageType == CloseMessage {
- c.writeFatal(ErrCloseSent)
+ _ = c.writeFatal(ErrCloseSent)
}
return err
}
@@ -477,7 +490,9 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
// probably better to return an error in this situation, but we cannot
// change this without breaking existing applications.
if c.writer != nil {
- c.writer.Close()
+ if err := c.writer.Close(); err != nil {
+ log.Printf("websocket: discarding writer close error: %v", err)
+ }
c.writer = nil
}
@@ -630,7 +645,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error {
}
if final {
- w.endMessage(errWriteClosed)
+ _ = w.endMessage(errWriteClosed)
return nil
}
@@ -795,7 +810,7 @@ func (c *Conn) advanceFrame() (int, error) {
// 1. Skip remainder of previous frame.
if c.readRemaining > 0 {
- if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil {
return noFrame, err
}
}
@@ -817,7 +832,9 @@ func (c *Conn) advanceFrame() (int, error) {
rsv2 := p[0]&rsv2Bit != 0
rsv3 := p[0]&rsv3Bit != 0
mask := p[1]&maskBit != 0
- c.setReadRemaining(int64(p[1] & 0x7f))
+ if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil {
+ return noFrame, err
+ }
c.readDecompress = false
if rsv1 {
@@ -922,7 +939,9 @@ func (c *Conn) advanceFrame() (int, error) {
}
if c.readLimit > 0 && c.readLength > c.readLimit {
- c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil {
+ return noFrame, err
+ }
return noFrame, ErrReadLimit
}
@@ -934,7 +953,9 @@ func (c *Conn) advanceFrame() (int, error) {
var payload []byte
if c.readRemaining > 0 {
payload, err = c.read(int(c.readRemaining))
- c.setReadRemaining(0)
+ if err := c.setReadRemaining(0); err != nil {
+ return noFrame, err
+ }
if err != nil {
return noFrame, err
}
@@ -981,7 +1002,9 @@ func (c *Conn) handleProtocolError(message string) error {
if len(data) > maxControlFramePayloadSize {
data = data[:maxControlFramePayloadSize]
}
- c.WriteControl(CloseMessage, data, time.Now().Add(writeWait))
+ if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil {
+ return err
+ }
return errors.New("websocket: " + message)
}
@@ -998,7 +1021,9 @@ func (c *Conn) handleProtocolError(message string) error {
func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
// Close previous reader, only relevant for decompression.
if c.reader != nil {
- c.reader.Close()
+ if err := c.reader.Close(); err != nil {
+ log.Printf("websocket: discarding reader close error: %v", err)
+ }
c.reader = nil
}
@@ -1054,7 +1079,9 @@ func (r *messageReader) Read(b []byte) (int, error) {
}
rem := c.readRemaining
rem -= int64(n)
- c.setReadRemaining(rem)
+ if err := c.setReadRemaining(rem); err != nil {
+ return 0, err
+ }
if c.readRemaining > 0 && c.readErr == io.EOF {
c.readErr = errUnexpectedEOF
}
@@ -1094,7 +1121,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
if err != nil {
return messageType, nil, err
}
- p, err = ioutil.ReadAll(r)
+ p, err = io.ReadAll(r)
return messageType, p, err
}
@@ -1136,7 +1163,9 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
if h == nil {
h = func(code int, text string) error {
message := FormatCloseMessage(code, "")
- c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil {
+ return err
+ }
return nil
}
}
@@ -1161,7 +1190,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) {
err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
if err == ErrCloseSent {
return nil
- } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ } else if _, ok := err.(net.Error); ok {
return nil
}
return err
@@ -1189,8 +1218,16 @@ func (c *Conn) SetPongHandler(h func(appData string) error) {
c.handlePong = h
}
+// NetConn returns the underlying connection that is wrapped by c.
+// Note that writing to or reading from this connection directly will corrupt the
+// WebSocket connection.
+func (c *Conn) NetConn() net.Conn {
+ return c.conn
+}
+
// UnderlyingConn returns the internal net.Conn. This can be used to further
// modifications to connection specific flags.
+// Deprecated: Use the NetConn method.
func (c *Conn) UnderlyingConn() net.Conn {
return c.conn
}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
index d0742bf2..67d0968b 100644
--- a/vendor/github.com/gorilla/websocket/mask.go
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -9,6 +9,7 @@ package websocket
import "unsafe"
+// #nosec G103 -- (CWE-242) Has been audited
const wordSize = int(unsafe.Sizeof(uintptr(0)))
func maskBytes(key [4]byte, pos int, b []byte) int {
@@ -22,6 +23,7 @@ func maskBytes(key [4]byte, pos int, b []byte) int {
}
// Mask one byte at a time to word boundary.
+ //#nosec G103 -- (CWE-242) Has been audited
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
n = wordSize - n
for i := range b[:n] {
@@ -36,11 +38,13 @@ func maskBytes(key [4]byte, pos int, b []byte) int {
for i := range k {
k[i] = key[(pos+i)&3]
}
+ //#nosec G103 -- (CWE-242) Has been audited
kw := *(*uintptr)(unsafe.Pointer(&k))
// Mask one word at a time.
n := (len(b) / wordSize) * wordSize
for i := 0; i < n; i += wordSize {
+ //#nosec G103 -- (CWE-242) Has been audited
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
}
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
index e0f466b7..80f55d1e 100644
--- a/vendor/github.com/gorilla/websocket/proxy.go
+++ b/vendor/github.com/gorilla/websocket/proxy.go
@@ -8,10 +8,13 @@ import (
"bufio"
"encoding/base64"
"errors"
+ "log"
"net"
"net/http"
"net/url"
"strings"
+
+ "golang.org/x/net/proxy"
)
type netDialerFunc func(network, addr string) (net.Conn, error)
@@ -21,7 +24,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
}
func init() {
- proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) {
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
})
}
@@ -55,7 +58,9 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error)
}
if err := connectReq.Write(conn); err != nil {
- conn.Close()
+ if err := conn.Close(); err != nil {
+ log.Printf("httpProxyDialer: failed to close connection: %v", err)
+ }
return nil, err
}
@@ -64,12 +69,16 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error)
br := bufio.NewReader(conn)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
- conn.Close()
+ if err := conn.Close(); err != nil {
+ log.Printf("httpProxyDialer: failed to close connection: %v", err)
+ }
return nil, err
}
if resp.StatusCode != 200 {
- conn.Close()
+ if err := conn.Close(); err != nil {
+ log.Printf("httpProxyDialer: failed to close connection: %v", err)
+ }
f := strings.SplitN(resp.Status, " ", 2)
return nil, errors.New(f[1])
}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
index 24d53b38..1e720e1d 100644
--- a/vendor/github.com/gorilla/websocket/server.go
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -8,6 +8,7 @@ import (
"bufio"
"errors"
"io"
+ "log"
"net/http"
"net/url"
"strings"
@@ -154,8 +155,8 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
}
challengeKey := r.Header.Get("Sec-Websocket-Key")
- if challengeKey == "" {
- return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
+ if !isValidChallengeKey(challengeKey) {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length")
}
subprotocol := u.selectSubprotocol(r, responseHeader)
@@ -183,7 +184,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
}
if brw.Reader.Buffered() > 0 {
- netConn.Close()
+ if err := netConn.Close(); err != nil {
+ log.Printf("websocket: failed to close network connection: %v", err)
+ }
return nil, errors.New("websocket: client sent data before handshake is complete")
}
@@ -248,17 +251,34 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
p = append(p, "\r\n"...)
// Clear deadlines set by HTTP server.
- netConn.SetDeadline(time.Time{})
+ if err := netConn.SetDeadline(time.Time{}); err != nil {
+ if err := netConn.Close(); err != nil {
+ log.Printf("websocket: failed to close network connection: %v", err)
+ }
+ return nil, err
+ }
if u.HandshakeTimeout > 0 {
- netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil {
+ if err := netConn.Close(); err != nil {
+ log.Printf("websocket: failed to close network connection: %v", err)
+ }
+ return nil, err
+ }
}
if _, err = netConn.Write(p); err != nil {
- netConn.Close()
+ if err := netConn.Close(); err != nil {
+ log.Printf("websocket: failed to close network connection: %v", err)
+ }
return nil, err
}
if u.HandshakeTimeout > 0 {
- netConn.SetWriteDeadline(time.Time{})
+ if err := netConn.SetWriteDeadline(time.Time{}); err != nil {
+ if err := netConn.Close(); err != nil {
+ log.Printf("websocket: failed to close network connection: %v", err)
+ }
+ return nil, err
+ }
}
return c, nil
@@ -356,8 +376,12 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
// bufio.Writer's underlying writer.
var wh writeHook
bw.Reset(&wh)
- bw.WriteByte(0)
- bw.Flush()
+ if err := bw.WriteByte(0); err != nil {
+ panic(err)
+ }
+ if err := bw.Flush(); err != nil {
+ log.Printf("websocket: bufioWriterBuffer: Flush: %v", err)
+ }
bw.Reset(originalWriter)
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go
index a62b68cc..7f386453 100644
--- a/vendor/github.com/gorilla/websocket/tls_handshake.go
+++ b/vendor/github.com/gorilla/websocket/tls_handshake.go
@@ -1,6 +1,3 @@
-//go:build go1.17
-// +build go1.17
-
package websocket
import (
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go
deleted file mode 100644
index e1b2b44f..00000000
--- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !go1.17
-// +build !go1.17
-
-package websocket
-
-import (
- "context"
- "crypto/tls"
-)
-
-func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
- if err := tlsConn.Handshake(); err != nil {
- return err
- }
- if !cfg.InsecureSkipVerify {
- if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
index 7bf2f66c..9b1a629b 100644
--- a/vendor/github.com/gorilla/websocket/util.go
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -6,7 +6,7 @@ package websocket
import (
"crypto/rand"
- "crypto/sha1"
+ "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54
"encoding/base64"
"io"
"net/http"
@@ -17,7 +17,7 @@ import (
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
func computeAcceptKey(challengeKey string) string {
- h := sha1.New()
+ h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54
h.Write([]byte(challengeKey))
h.Write(keyGUID)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
@@ -281,3 +281,18 @@ headers:
}
return result
}
+
+// isValidChallengeKey checks if the argument meets RFC6455 specification.
+func isValidChallengeKey(s string) bool {
+ // From RFC6455:
+ //
+ // A |Sec-WebSocket-Key| header field with a base64-encoded (see
+ // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in
+ // length.
+
+ if s == "" {
+ return false
+ }
+ decoded, err := base64.StdEncoding.DecodeString(s)
+ return err == nil && len(decoded) == 16
+}
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
deleted file mode 100644
index 2e668f6b..00000000
--- a/vendor/github.com/gorilla/websocket/x_net_proxy.go
+++ /dev/null
@@ -1,473 +0,0 @@
-// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
-//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
-
-// Package proxy provides support for a variety of protocols to proxy network
-// data.
-//
-
-package websocket
-
-import (
- "errors"
- "io"
- "net"
- "net/url"
- "os"
- "strconv"
- "strings"
- "sync"
-)
-
-type proxy_direct struct{}
-
-// Direct is a direct proxy: one that makes network connections directly.
-var proxy_Direct = proxy_direct{}
-
-func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
- return net.Dial(network, addr)
-}
-
-// A PerHost directs connections to a default Dialer unless the host name
-// requested matches one of a number of exceptions.
-type proxy_PerHost struct {
- def, bypass proxy_Dialer
-
- bypassNetworks []*net.IPNet
- bypassIPs []net.IP
- bypassZones []string
- bypassHosts []string
-}
-
-// NewPerHost returns a PerHost Dialer that directs connections to either
-// defaultDialer or bypass, depending on whether the connection matches one of
-// the configured rules.
-func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
- return &proxy_PerHost{
- def: defaultDialer,
- bypass: bypass,
- }
-}
-
-// Dial connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- return p.dialerForRequest(host).Dial(network, addr)
-}
-
-func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
- if ip := net.ParseIP(host); ip != nil {
- for _, net := range p.bypassNetworks {
- if net.Contains(ip) {
- return p.bypass
- }
- }
- for _, bypassIP := range p.bypassIPs {
- if bypassIP.Equal(ip) {
- return p.bypass
- }
- }
- return p.def
- }
-
- for _, zone := range p.bypassZones {
- if strings.HasSuffix(host, zone) {
- return p.bypass
- }
- if host == zone[1:] {
- // For a zone ".example.com", we match "example.com"
- // too.
- return p.bypass
- }
- }
- for _, bypassHost := range p.bypassHosts {
- if bypassHost == host {
- return p.bypass
- }
- }
- return p.def
-}
-
-// AddFromString parses a string that contains comma-separated values
-// specifying hosts that should use the bypass proxy. Each value is either an
-// IP address, a CIDR range, a zone (*.example.com) or a host name
-// (localhost). A best effort is made to parse the string and errors are
-// ignored.
-func (p *proxy_PerHost) AddFromString(s string) {
- hosts := strings.Split(s, ",")
- for _, host := range hosts {
- host = strings.TrimSpace(host)
- if len(host) == 0 {
- continue
- }
- if strings.Contains(host, "/") {
- // We assume that it's a CIDR address like 127.0.0.0/8
- if _, net, err := net.ParseCIDR(host); err == nil {
- p.AddNetwork(net)
- }
- continue
- }
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
- continue
- }
- if strings.HasPrefix(host, "*.") {
- p.AddZone(host[1:])
- continue
- }
- p.AddHost(host)
- }
-}
-
-// AddIP specifies an IP address that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match an IP.
-func (p *proxy_PerHost) AddIP(ip net.IP) {
- p.bypassIPs = append(p.bypassIPs, ip)
-}
-
-// AddNetwork specifies an IP range that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match.
-func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
- p.bypassNetworks = append(p.bypassNetworks, net)
-}
-
-// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
-// "example.com" matches "example.com" and all of its subdomains.
-func (p *proxy_PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
- if !strings.HasPrefix(zone, ".") {
- zone = "." + zone
- }
- p.bypassZones = append(p.bypassZones, zone)
-}
-
-// AddHost specifies a host name that will use the bypass proxy.
-func (p *proxy_PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
- p.bypassHosts = append(p.bypassHosts, host)
-}
-
-// A Dialer is a means to establish a connection.
-type proxy_Dialer interface {
- // Dial connects to the given address via the proxy.
- Dial(network, addr string) (c net.Conn, err error)
-}
-
-// Auth contains authentication parameters that specific Dialers may require.
-type proxy_Auth struct {
- User, Password string
-}
-
-// FromEnvironment returns the dialer specified by the proxy related variables in
-// the environment.
-func proxy_FromEnvironment() proxy_Dialer {
- allProxy := proxy_allProxyEnv.Get()
- if len(allProxy) == 0 {
- return proxy_Direct
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return proxy_Direct
- }
- proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
- if err != nil {
- return proxy_Direct
- }
-
- noProxy := proxy_noProxyEnv.Get()
- if len(noProxy) == 0 {
- return proxy
- }
-
- perHost := proxy_NewPerHost(proxy, proxy_Direct)
- perHost.AddFromString(noProxy)
- return perHost
-}
-
-// proxySchemes is a map from URL schemes to a function that creates a Dialer
-// from a URL with such a scheme.
-var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
-
-// RegisterDialerType takes a URL scheme and a function to generate Dialers from
-// a URL with that scheme and a forwarding Dialer. Registered schemes are used
-// by FromURL.
-func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
- if proxy_proxySchemes == nil {
- proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
- }
- proxy_proxySchemes[scheme] = f
-}
-
-// FromURL returns a Dialer given a URL specification and an underlying
-// Dialer for it to make network requests.
-func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
- var auth *proxy_Auth
- if u.User != nil {
- auth = new(proxy_Auth)
- auth.User = u.User.Username()
- if p, ok := u.User.Password(); ok {
- auth.Password = p
- }
- }
-
- switch u.Scheme {
- case "socks5":
- return proxy_SOCKS5("tcp", u.Host, auth, forward)
- }
-
- // If the scheme doesn't match any of the built-in schemes, see if it
- // was registered by another package.
- if proxy_proxySchemes != nil {
- if f, ok := proxy_proxySchemes[u.Scheme]; ok {
- return f(u, forward)
- }
- }
-
- return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
-}
-
-var (
- proxy_allProxyEnv = &proxy_envOnce{
- names: []string{"ALL_PROXY", "all_proxy"},
- }
- proxy_noProxyEnv = &proxy_envOnce{
- names: []string{"NO_PROXY", "no_proxy"},
- }
-)
-
-// envOnce looks up an environment variable (optionally by multiple
-// names) once. It mitigates expensive lookups on some platforms
-// (e.g. Windows).
-// (Borrowed from net/http/transport.go)
-type proxy_envOnce struct {
- names []string
- once sync.Once
- val string
-}
-
-func (e *proxy_envOnce) Get() string {
- e.once.Do(e.init)
- return e.val
-}
-
-func (e *proxy_envOnce) init() {
- for _, n := range e.names {
- e.val = os.Getenv(n)
- if e.val != "" {
- return
- }
- }
-}
-
-// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
-// with an optional username and password. See RFC 1928 and RFC 1929.
-func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
- s := &proxy_socks5{
- network: network,
- addr: addr,
- forward: forward,
- }
- if auth != nil {
- s.user = auth.User
- s.password = auth.Password
- }
-
- return s, nil
-}
-
-type proxy_socks5 struct {
- user, password string
- network, addr string
- forward proxy_Dialer
-}
-
-const proxy_socks5Version = 5
-
-const (
- proxy_socks5AuthNone = 0
- proxy_socks5AuthPassword = 2
-)
-
-const proxy_socks5Connect = 1
-
-const (
- proxy_socks5IP4 = 1
- proxy_socks5Domain = 3
- proxy_socks5IP6 = 4
-)
-
-var proxy_socks5Errors = []string{
- "",
- "general failure",
- "connection forbidden",
- "network unreachable",
- "host unreachable",
- "connection refused",
- "TTL expired",
- "command not supported",
- "address type not supported",
-}
-
-// Dial connects to the address addr on the given network via the SOCKS5 proxy.
-func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
- switch network {
- case "tcp", "tcp6", "tcp4":
- default:
- return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
- }
-
- conn, err := s.forward.Dial(s.network, s.addr)
- if err != nil {
- return nil, err
- }
- if err := s.connect(conn, addr); err != nil {
- conn.Close()
- return nil, err
- }
- return conn, nil
-}
-
-// connect takes an existing connection to a socks5 proxy server,
-// and commands the server to extend that connection to target,
-// which must be a canonical address with a host and port.
-func (s *proxy_socks5) connect(conn net.Conn, target string) error {
- host, portStr, err := net.SplitHostPort(target)
- if err != nil {
- return err
- }
-
- port, err := strconv.Atoi(portStr)
- if err != nil {
- return errors.New("proxy: failed to parse port number: " + portStr)
- }
- if port < 1 || port > 0xffff {
- return errors.New("proxy: port number out of range: " + portStr)
- }
-
- // the size here is just an estimate
- buf := make([]byte, 0, 6+len(host))
-
- buf = append(buf, proxy_socks5Version)
- if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
- buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
- } else {
- buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
- }
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- if buf[0] != 5 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
- }
- if buf[1] == 0xff {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
- }
-
- // See RFC 1929
- if buf[1] == proxy_socks5AuthPassword {
- buf = buf[:0]
- buf = append(buf, 1 /* password protocol version */)
- buf = append(buf, uint8(len(s.user)))
- buf = append(buf, s.user...)
- buf = append(buf, uint8(len(s.password)))
- buf = append(buf, s.password...)
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if buf[1] != 0 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
- }
- }
-
- buf = buf[:0]
- buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
-
- if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil {
- buf = append(buf, proxy_socks5IP4)
- ip = ip4
- } else {
- buf = append(buf, proxy_socks5IP6)
- }
- buf = append(buf, ip...)
- } else {
- if len(host) > 255 {
- return errors.New("proxy: destination host name too long: " + host)
- }
- buf = append(buf, proxy_socks5Domain)
- buf = append(buf, byte(len(host)))
- buf = append(buf, host...)
- }
- buf = append(buf, byte(port>>8), byte(port))
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:4]); err != nil {
- return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- failure := "unknown error"
- if int(buf[1]) < len(proxy_socks5Errors) {
- failure = proxy_socks5Errors[buf[1]]
- }
-
- if len(failure) > 0 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
- }
-
- bytesToDiscard := 0
- switch buf[3] {
- case proxy_socks5IP4:
- bytesToDiscard = net.IPv4len
- case proxy_socks5IP6:
- bytesToDiscard = net.IPv6len
- case proxy_socks5Domain:
- _, err := io.ReadFull(conn, buf[:1])
- if err != nil {
- return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- bytesToDiscard = int(buf[0])
- default:
- return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
- }
-
- if cap(buf) < bytesToDiscard {
- buf = make([]byte, bytesToDiscard)
- } else {
- buf = buf[:bytesToDiscard]
- }
- if _, err := io.ReadFull(conn, buf); err != nil {
- return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- // Also need to discard the port number
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- return nil
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/.gitignore b/vendor/github.com/graph-gophers/graphql-go/.gitignore
deleted file mode 100644
index 2fa95abe..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-/.idea
-/.vscode
-/internal/validation/testdata/graphql-js
-/internal/validation/testdata/node_modules
-/vendor
diff --git a/vendor/github.com/graph-gophers/graphql-go/.golangci.yml b/vendor/github.com/graph-gophers/graphql-go/.golangci.yml
deleted file mode 100644
index c6741d58..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/.golangci.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-run:
- timeout: 5m
-
-linters-settings:
- gofmt:
- simplify: true
- govet:
- check-shadowing: true
- enable-all: true
- disable:
- - fieldalignment
- - deepequalerrors # remove later
-
-linters:
- disable-all: true
- enable:
- - deadcode
- - gofmt
- - gosimple
- - govet
- - ineffassign
- - exportloopref
- - structcheck
- - staticcheck
- - unconvert
- - unused
- - varcheck
- - misspell
- - goimports
-
-issues:
- exclude-rules:
- - linters:
- - unused
- path: "graphql_test.go"
\ No newline at end of file
diff --git a/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md b/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md
deleted file mode 100644
index e5f48c06..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md
+++ /dev/null
@@ -1,10 +0,0 @@
-CHANGELOG
-
-[v1.1.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.1.0) Release v1.1.0
-* [FEATURE] Add types package #437
-* [FEATURE] Expose `packer.Unmarshaler` as `decode.Unmarshaler` to the public #450
-* [FEATURE] Add location fields to type definitions #454
-* [FEATURE] `errors.Errorf` preserves original error similar to `fmt.Errorf` #456
-* [BUGFIX] Fix duplicated __typename in response (fixes #369) #443
-
-[v1.0.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.0.0) Initial release
diff --git a/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md b/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md
deleted file mode 100644
index a2cffca8..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md
+++ /dev/null
@@ -1,13 +0,0 @@
-## Contributing
-
-- With issues:
- - Use the search tool before opening a new issue.
- - Please provide source code and commit sha if you found a bug.
- - Review existing issues and provide feedback or react to them.
-
-- With pull requests:
- - Open your pull request against `master`
- - Your pull request should have no more than two commits, if not you should squash them.
- - It should pass all tests in the available continuous integrations systems such as TravisCI.
- - You should add/modify tests to cover your proposed code changes.
- - If your pull request contains a new feature, please document it on the README.
\ No newline at end of file
diff --git a/vendor/github.com/graph-gophers/graphql-go/LICENSE b/vendor/github.com/graph-gophers/graphql-go/LICENSE
deleted file mode 100644
index 3907ceca..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-Copyright (c) 2016 Richard Musiol. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/graph-gophers/graphql-go/README.md b/vendor/github.com/graph-gophers/graphql-go/README.md
deleted file mode 100644
index 87b020c1..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/README.md
+++ /dev/null
@@ -1,169 +0,0 @@
-# graphql-go [![Sourcegraph](https://sourcegraph.com/github.com/graph-gophers/graphql-go/-/badge.svg)](https://sourcegraph.com/github.com/graph-gophers/graphql-go?badge) [![Build Status](https://graph-gophers.semaphoreci.com/badges/graphql-go/branches/master.svg?style=shields)](https://graph-gophers.semaphoreci.com/projects/graphql-go) [![GoDoc](https://godoc.org/github.com/graph-gophers/graphql-go?status.svg)](https://godoc.org/github.com/graph-gophers/graphql-go)
-
-
-
-The goal of this project is to provide full support of the [GraphQL draft specification](https://facebook.github.io/graphql/draft) with a set of idiomatic, easy to use Go packages.
-
-While still under heavy development (`internal` APIs are almost certainly subject to change), this library is
-safe for production use.
-
-## Features
-
-- minimal API
-- support for `context.Context`
-- support for the `OpenTracing` standard
-- schema type-checking against resolvers
-- resolvers are matched to the schema based on method sets (can resolve a GraphQL schema with a Go interface or Go struct).
-- handles panics in resolvers
-- parallel execution of resolvers
-- subscriptions
- - [sample WS transport](https://github.com/graph-gophers/graphql-transport-ws)
-
-## Roadmap
-
-We're trying out the GitHub Project feature to manage `graphql-go`'s [development roadmap](https://github.com/graph-gophers/graphql-go/projects/1).
-Feedback is welcome and appreciated.
-
-## (Some) Documentation
-
-### Basic Sample
-
-```go
-package main
-
-import (
- "log"
- "net/http"
-
- graphql "github.com/graph-gophers/graphql-go"
- "github.com/graph-gophers/graphql-go/relay"
-)
-
-type query struct{}
-
-func (_ *query) Hello() string { return "Hello, world!" }
-
-func main() {
- s := `
- type Query {
- hello: String!
- }
- `
- schema := graphql.MustParseSchema(s, &query{})
- http.Handle("/query", &relay.Handler{Schema: schema})
- log.Fatal(http.ListenAndServe(":8080", nil))
-}
-```
-
-To test:
-
-```sh
-curl -XPOST -d '{"query": "{ hello }"}' localhost:8080/query
-```
-
-### Resolvers
-
-A resolver must have one method or field for each field of the GraphQL type it resolves. The method or field name has to be [exported](https://golang.org/ref/spec#Exported_identifiers) and match the schema's field's name in a non-case-sensitive way.
-You can use struct fields as resolvers by using `SchemaOpt: UseFieldResolvers()`. For example,
-```
-opts := []graphql.SchemaOpt{graphql.UseFieldResolvers()}
-schema := graphql.MustParseSchema(s, &query{}, opts...)
-```
-
-When using `UseFieldResolvers` schema option, a struct field will be used *only* when:
-- there is no method for a struct field
-- a struct field does not implement an interface method
-- a struct field does not have arguments
-
-The method has up to two arguments:
-
-- Optional `context.Context` argument.
-- Mandatory `*struct { ... }` argument if the corresponding GraphQL field has arguments. The names of the struct fields have to be [exported](https://golang.org/ref/spec#Exported_identifiers) and have to match the names of the GraphQL arguments in a non-case-sensitive way.
-
-The method has up to two results:
-
-- The GraphQL field's value as determined by the resolver.
-- Optional `error` result.
-
-Example for a simple resolver method:
-
-```go
-func (r *helloWorldResolver) Hello() string {
- return "Hello world!"
-}
-```
-
-The following signature is also allowed:
-
-```go
-func (r *helloWorldResolver) Hello(ctx context.Context) (string, error) {
- return "Hello world!", nil
-}
-```
-
-### Schema Options
-
-- `UseStringDescriptions()` enables the usage of double quoted and triple quoted. When this is not enabled, comments are parsed as descriptions instead.
-- `UseFieldResolvers()` specifies whether to use struct field resolvers.
-- `MaxDepth(n int)` specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking.
-- `MaxParallelism(n int)` specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10.
-- `Tracer(tracer trace.Tracer)` is used to trace queries and fields. It defaults to `trace.OpenTracingTracer`.
-- `ValidationTracer(tracer trace.ValidationTracer)` is used to trace validation errors. It defaults to `trace.NoopValidationTracer`.
-- `Logger(logger log.Logger)` is used to log panics during query execution. It defaults to `exec.DefaultLogger`.
-- `PanicHandler(panicHandler errors.PanicHandler)` is used to transform panics into errors during query execution. It defaults to `errors.DefaultPanicHandler`.
-- `DisableIntrospection()` disables introspection queries.
-
-### Custom Errors
-
-Errors returned by resolvers can include custom extensions by implementing the `ResolverError` interface:
-
-```go
-type ResolverError interface {
- error
- Extensions() map[string]interface{}
-}
-```
-
-Example of a simple custom error:
-
-```go
-type droidNotFoundError struct {
- Code string `json:"code"`
- Message string `json:"message"`
-}
-
-func (e droidNotFoundError) Error() string {
- return fmt.Sprintf("error [%s]: %s", e.Code, e.Message)
-}
-
-func (e droidNotFoundError) Extensions() map[string]interface{} {
- return map[string]interface{}{
- "code": e.Code,
- "message": e.Message,
- }
-}
-```
-
-Which could produce a GraphQL error such as:
-
-```go
-{
- "errors": [
- {
- "message": "error [NotFound]: This is not the droid you are looking for",
- "path": [
- "droid"
- ],
- "extensions": {
- "code": "NotFound",
- "message": "This is not the droid you are looking for"
- }
- }
- ],
- "data": null
-}
-```
-
-### [Examples](https://github.com/graph-gophers/graphql-go/wiki/Examples)
-
-### [Companies that use this library](https://github.com/graph-gophers/graphql-go/wiki/Users)
diff --git a/vendor/github.com/graph-gophers/graphql-go/decode/decode.go b/vendor/github.com/graph-gophers/graphql-go/decode/decode.go
deleted file mode 100644
index 56a9d5b5..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/decode/decode.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package decode
-
-// Unmarshaler defines the api of Go types mapped to custom GraphQL scalar types
-type Unmarshaler interface {
- // ImplementsGraphQLType maps the implementing custom Go type
- // to the GraphQL scalar type in the schema.
- ImplementsGraphQLType(name string) bool
- // UnmarshalGraphQL is the custom unmarshaler for the implementing type
- //
- // This function will be called whenever you use the
- // custom GraphQL scalar type as an input
- UnmarshalGraphQL(input interface{}) error
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/errors.go b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go
deleted file mode 100644
index 0f9340b1..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/errors/errors.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package errors
-
-import (
- "fmt"
-)
-
-type QueryError struct {
- Err error `json:"-"` // Err holds underlying if available
- Message string `json:"message"`
- Locations []Location `json:"locations,omitempty"`
- Path []interface{} `json:"path,omitempty"`
- Rule string `json:"-"`
- ResolverError error `json:"-"`
- Extensions map[string]interface{} `json:"extensions,omitempty"`
-}
-
-type Location struct {
- Line int `json:"line"`
- Column int `json:"column"`
-}
-
-func (a Location) Before(b Location) bool {
- return a.Line < b.Line || (a.Line == b.Line && a.Column < b.Column)
-}
-
-func Errorf(format string, a ...interface{}) *QueryError {
- // similar to fmt.Errorf, Errorf will wrap the last argument if it is an instance of error
- var err error
- if n := len(a); n > 0 {
- if v, ok := a[n-1].(error); ok {
- err = v
- }
- }
-
- return &QueryError{
- Err: err,
- Message: fmt.Sprintf(format, a...),
- }
-}
-
-func (err *QueryError) Error() string {
- if err == nil {
- return ""
- }
- str := fmt.Sprintf("graphql: %s", err.Message)
- for _, loc := range err.Locations {
- str += fmt.Sprintf(" (line %d, column %d)", loc.Line, loc.Column)
- }
- return str
-}
-
-func (err *QueryError) Unwrap() error {
- if err == nil {
- return nil
- }
- return err.Err
-}
-
-var _ error = &QueryError{}
diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go b/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go
deleted file mode 100644
index 5446c2a9..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package errors
-
-import (
- "context"
-)
-
-// PanicHandler is the interface used to create custom panic errors that occur during query execution
-type PanicHandler interface {
- MakePanicError(ctx context.Context, value interface{}) *QueryError
-}
-
-// DefaultPanicHandler is the default PanicHandler
-type DefaultPanicHandler struct{}
-
-// MakePanicError creates a new QueryError from a panic that occurred during execution
-func (h *DefaultPanicHandler) MakePanicError(ctx context.Context, value interface{}) *QueryError {
- return Errorf("panic occurred: %v", value)
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/graphql.go b/vendor/github.com/graph-gophers/graphql-go/graphql.go
deleted file mode 100644
index 76a6434d..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/graphql.go
+++ /dev/null
@@ -1,339 +0,0 @@
-package graphql
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/common"
- "github.com/graph-gophers/graphql-go/internal/exec"
- "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
- "github.com/graph-gophers/graphql-go/internal/exec/selected"
- "github.com/graph-gophers/graphql-go/internal/query"
- "github.com/graph-gophers/graphql-go/internal/schema"
- "github.com/graph-gophers/graphql-go/internal/validation"
- "github.com/graph-gophers/graphql-go/introspection"
- "github.com/graph-gophers/graphql-go/log"
- "github.com/graph-gophers/graphql-go/trace"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-// ParseSchema parses a GraphQL schema and attaches the given root resolver. It returns an error if
-// the Go type signature of the resolvers does not match the schema. If nil is passed as the
-// resolver, then the schema can not be executed, but it may be inspected (e.g. with ToJSON).
-func ParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) (*Schema, error) {
- s := &Schema{
- schema: schema.New(),
- maxParallelism: 10,
- tracer: trace.OpenTracingTracer{},
- logger: &log.DefaultLogger{},
- panicHandler: &errors.DefaultPanicHandler{},
- }
- for _, opt := range opts {
- opt(s)
- }
-
- if s.validationTracer == nil {
- if tracer, ok := s.tracer.(trace.ValidationTracerContext); ok {
- s.validationTracer = tracer
- } else {
- s.validationTracer = &validationBridgingTracer{tracer: trace.NoopValidationTracer{}}
- }
- }
-
- if err := schema.Parse(s.schema, schemaString, s.useStringDescriptions); err != nil {
- return nil, err
- }
- if err := s.validateSchema(); err != nil {
- return nil, err
- }
-
- r, err := resolvable.ApplyResolver(s.schema, resolver)
- if err != nil {
- return nil, err
- }
- s.res = r
-
- return s, nil
-}
-
-// MustParseSchema calls ParseSchema and panics on error.
-func MustParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) *Schema {
- s, err := ParseSchema(schemaString, resolver, opts...)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// Schema represents a GraphQL schema with an optional resolver.
-type Schema struct {
- schema *types.Schema
- res *resolvable.Schema
-
- maxDepth int
- maxParallelism int
- tracer trace.Tracer
- validationTracer trace.ValidationTracerContext
- logger log.Logger
- panicHandler errors.PanicHandler
- useStringDescriptions bool
- disableIntrospection bool
- subscribeResolverTimeout time.Duration
-}
-
-func (s *Schema) ASTSchema() *types.Schema {
- return s.schema
-}
-
-// SchemaOpt is an option to pass to ParseSchema or MustParseSchema.
-type SchemaOpt func(*Schema)
-
-// UseStringDescriptions enables the usage of double quoted and triple quoted
-// strings as descriptions as per the June 2018 spec
-// https://facebook.github.io/graphql/June2018/. When this is not enabled,
-// comments are parsed as descriptions instead.
-func UseStringDescriptions() SchemaOpt {
- return func(s *Schema) {
- s.useStringDescriptions = true
- }
-}
-
-// UseFieldResolvers specifies whether to use struct field resolvers
-func UseFieldResolvers() SchemaOpt {
- return func(s *Schema) {
- s.schema.UseFieldResolvers = true
- }
-}
-
-// MaxDepth specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking.
-func MaxDepth(n int) SchemaOpt {
- return func(s *Schema) {
- s.maxDepth = n
- }
-}
-
-// MaxParallelism specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10.
-func MaxParallelism(n int) SchemaOpt {
- return func(s *Schema) {
- s.maxParallelism = n
- }
-}
-
-// Tracer is used to trace queries and fields. It defaults to trace.OpenTracingTracer.
-func Tracer(tracer trace.Tracer) SchemaOpt {
- return func(s *Schema) {
- s.tracer = tracer
- }
-}
-
-// ValidationTracer is used to trace validation errors. It defaults to trace.NoopValidationTracer.
-// Deprecated: context is needed to support tracing correctly. Use a Tracer which implements trace.ValidationTracerContext.
-func ValidationTracer(tracer trace.ValidationTracer) SchemaOpt { //nolint:staticcheck
- return func(s *Schema) {
- s.validationTracer = &validationBridgingTracer{tracer: tracer}
- }
-}
-
-// Logger is used to log panics during query execution. It defaults to exec.DefaultLogger.
-func Logger(logger log.Logger) SchemaOpt {
- return func(s *Schema) {
- s.logger = logger
- }
-}
-
-// PanicHandler is used to customize the panic errors during query execution.
-// It defaults to errors.DefaultPanicHandler.
-func PanicHandler(panicHandler errors.PanicHandler) SchemaOpt {
- return func(s *Schema) {
- s.panicHandler = panicHandler
- }
-}
-
-// DisableIntrospection disables introspection queries.
-func DisableIntrospection() SchemaOpt {
- return func(s *Schema) {
- s.disableIntrospection = true
- }
-}
-
-// SubscribeResolverTimeout is an option to control the amount of time
-// we allow for a single subscribe message resolver to complete it's job
-// before it times out and returns an error to the subscriber.
-func SubscribeResolverTimeout(timeout time.Duration) SchemaOpt {
- return func(s *Schema) {
- s.subscribeResolverTimeout = timeout
- }
-}
-
-// Response represents a typical response of a GraphQL server. It may be encoded to JSON directly or
-// it may be further processed to a custom response type, for example to include custom error data.
-// Errors are intentionally serialized first based on the advice in https://github.com/facebook/graphql/commit/7b40390d48680b15cb93e02d46ac5eb249689876#diff-757cea6edf0288677a9eea4cfc801d87R107
-type Response struct {
- Errors []*errors.QueryError `json:"errors,omitempty"`
- Data json.RawMessage `json:"data,omitempty"`
- Extensions map[string]interface{} `json:"extensions,omitempty"`
-}
-
-// Validate validates the given query with the schema.
-func (s *Schema) Validate(queryString string) []*errors.QueryError {
- return s.ValidateWithVariables(queryString, nil)
-}
-
-// ValidateWithVariables validates the given query with the schema and the input variables.
-func (s *Schema) ValidateWithVariables(queryString string, variables map[string]interface{}) []*errors.QueryError {
- doc, qErr := query.Parse(queryString)
- if qErr != nil {
- return []*errors.QueryError{qErr}
- }
-
- return validation.Validate(s.schema, doc, variables, s.maxDepth)
-}
-
-// Exec executes the given query with the schema's resolver. It panics if the schema was created
-// without a resolver. If the context get cancelled, no further resolvers will be called and a
-// the context error will be returned as soon as possible (not immediately).
-func (s *Schema) Exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) *Response {
- if !s.res.Resolver.IsValid() {
- panic("schema created without resolver, can not exec")
- }
- return s.exec(ctx, queryString, operationName, variables, s.res)
-}
-
-func (s *Schema) exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) *Response {
- doc, qErr := query.Parse(queryString)
- if qErr != nil {
- return &Response{Errors: []*errors.QueryError{qErr}}
- }
-
- validationFinish := s.validationTracer.TraceValidation(ctx)
- errs := validation.Validate(s.schema, doc, variables, s.maxDepth)
- validationFinish(errs)
- if len(errs) != 0 {
- return &Response{Errors: errs}
- }
-
- op, err := getOperation(doc, operationName)
- if err != nil {
- return &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}}
- }
-
- // If the optional "operationName" POST parameter is not provided then
- // use the query's operation name for improved tracing.
- if operationName == "" {
- operationName = op.Name.Name
- }
-
- // Subscriptions are not valid in Exec. Use schema.Subscribe() instead.
- if op.Type == query.Subscription {
- return &Response{Errors: []*errors.QueryError{{Message: "graphql-ws protocol header is missing"}}}
- }
- if op.Type == query.Mutation {
- if _, ok := s.schema.EntryPoints["mutation"]; !ok {
- return &Response{Errors: []*errors.QueryError{{Message: "no mutations are offered by the schema"}}}
- }
- }
-
- // Fill in variables with the defaults from the operation
- if variables == nil {
- variables = make(map[string]interface{}, len(op.Vars))
- }
- for _, v := range op.Vars {
- if _, ok := variables[v.Name.Name]; !ok && v.Default != nil {
- variables[v.Name.Name] = v.Default.Deserialize(nil)
- }
- }
-
- r := &exec.Request{
- Request: selected.Request{
- Doc: doc,
- Vars: variables,
- Schema: s.schema,
- DisableIntrospection: s.disableIntrospection,
- },
- Limiter: make(chan struct{}, s.maxParallelism),
- Tracer: s.tracer,
- Logger: s.logger,
- PanicHandler: s.panicHandler,
- }
- varTypes := make(map[string]*introspection.Type)
- for _, v := range op.Vars {
- t, err := common.ResolveType(v.Type, s.schema.Resolve)
- if err != nil {
- return &Response{Errors: []*errors.QueryError{err}}
- }
- varTypes[v.Name.Name] = introspection.WrapType(t)
- }
- traceCtx, finish := s.tracer.TraceQuery(ctx, queryString, operationName, variables, varTypes)
- data, errs := r.Execute(traceCtx, res, op)
- finish(errs)
-
- return &Response{
- Data: data,
- Errors: errs,
- }
-}
-
-func (s *Schema) validateSchema() error {
- // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types
- // > The query root operation type must be provided and must be an Object type.
- if err := validateRootOp(s.schema, "query", true); err != nil {
- return err
- }
- // > The mutation root operation type is optional; if it is not provided, the service does not support mutations.
- // > If it is provided, it must be an Object type.
- if err := validateRootOp(s.schema, "mutation", false); err != nil {
- return err
- }
- // > Similarly, the subscription root operation type is also optional; if it is not provided, the service does not
- // > support subscriptions. If it is provided, it must be an Object type.
- if err := validateRootOp(s.schema, "subscription", false); err != nil {
- return err
- }
- return nil
-}
-
-type validationBridgingTracer struct {
- tracer trace.ValidationTracer //nolint:staticcheck
-}
-
-func (t *validationBridgingTracer) TraceValidation(context.Context) trace.TraceValidationFinishFunc {
- return t.tracer.TraceValidation()
-}
-
-func validateRootOp(s *types.Schema, name string, mandatory bool) error {
- t, ok := s.EntryPoints[name]
- if !ok {
- if mandatory {
- return fmt.Errorf("root operation %q must be defined", name)
- }
- return nil
- }
- if t.Kind() != "OBJECT" {
- return fmt.Errorf("root operation %q must be an OBJECT", name)
- }
- return nil
-}
-
-func getOperation(document *types.ExecutableDefinition, operationName string) (*types.OperationDefinition, error) {
- if len(document.Operations) == 0 {
- return nil, fmt.Errorf("no operations in query document")
- }
-
- if operationName == "" {
- if len(document.Operations) > 1 {
- return nil, fmt.Errorf("more than one operation in query document and no operation name given")
- }
- for _, op := range document.Operations {
- return op, nil // return the one and only operation
- }
- }
-
- op := document.Operations.Get(operationName)
- if op == nil {
- return nil, fmt.Errorf("no operation with name %q", operationName)
- }
- return op, nil
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/id.go b/vendor/github.com/graph-gophers/graphql-go/id.go
deleted file mode 100644
index 80bdac90..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/id.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package graphql
-
-import (
- "fmt"
- "strconv"
-)
-
-// ID represents GraphQL's "ID" scalar type. A custom type may be used instead.
-type ID string
-
-func (ID) ImplementsGraphQLType(name string) bool {
- return name == "ID"
-}
-
-func (id *ID) UnmarshalGraphQL(input interface{}) error {
- var err error
- switch input := input.(type) {
- case string:
- *id = ID(input)
- case int32:
- *id = ID(strconv.Itoa(int(input)))
- default:
- err = fmt.Errorf("wrong type for ID: %T", input)
- }
- return err
-}
-
-func (id ID) MarshalJSON() ([]byte, error) {
- return strconv.AppendQuote(nil, string(id)), nil
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go
deleted file mode 100644
index 1f7fe813..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// MIT License
-//
-// Copyright (c) 2019 GraphQL Contributors
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-//
-// This implementation has been adapted from the graphql-js reference implementation
-// https://github.com/graphql/graphql-js/blob/5eb7c4ded7ceb83ac742149cbe0dae07a8af9a30/src/language/blockString.js
-// which is released under the MIT License above.
-
-package common
-
-import (
- "strings"
-)
-
-// Produces the value of a block string from its parsed raw value, similar to
-// CoffeeScript's block string, Python's docstring trim or Ruby's strip_heredoc.
-//
-// This implements the GraphQL spec's BlockStringValue() static algorithm.
-func blockString(raw string) string {
- lines := strings.Split(raw, "\n")
-
- // Remove common indentation from all lines except the first (which has none)
- ind := blockStringIndentation(lines)
- if ind > 0 {
- for i := 1; i < len(lines); i++ {
- l := lines[i]
- if len(l) < ind {
- lines[i] = ""
- continue
- }
- lines[i] = l[ind:]
- }
- }
-
- // Remove leading and trailing blank lines
- trimStart := 0
- for i := 0; i < len(lines) && isBlank(lines[i]); i++ {
- trimStart++
- }
- lines = lines[trimStart:]
- trimEnd := 0
- for i := len(lines) - 1; i > 0 && isBlank(lines[i]); i-- {
- trimEnd++
- }
- lines = lines[:len(lines)-trimEnd]
-
- return strings.Join(lines, "\n")
-}
-
-func blockStringIndentation(lines []string) int {
- var commonIndent *int
- for i := 1; i < len(lines); i++ {
- l := lines[i]
- indent := leadingWhitespace(l)
- if indent == len(l) {
- // don't consider blank/empty lines
- continue
- }
- if indent == 0 {
- return 0
- }
- if commonIndent == nil || indent < *commonIndent {
- commonIndent = &indent
- }
- }
- if commonIndent == nil {
- return 0
- }
- return *commonIndent
-}
-
-func isBlank(s string) bool {
- return len(s) == 0 || leadingWhitespace(s) == len(s)
-}
-
-func leadingWhitespace(s string) int {
- i := 0
- for _, r := range s {
- if r != '\t' && r != ' ' {
- break
- }
- i++
- }
- return i
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go
deleted file mode 100644
index f767e28f..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package common
-
-import "github.com/graph-gophers/graphql-go/types"
-
-func ParseDirectives(l *Lexer) types.DirectiveList {
- var directives types.DirectiveList
- for l.Peek() == '@' {
- l.ConsumeToken('@')
- d := &types.Directive{}
- d.Name = l.ConsumeIdentWithLoc()
- d.Name.Loc.Column--
- if l.Peek() == '(' {
- d.Arguments = ParseArgumentList(l)
- }
- directives = append(directives, d)
- }
- return directives
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go
deleted file mode 100644
index ff45bcad..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package common
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
- "text/scanner"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type syntaxError string
-
-type Lexer struct {
- sc *scanner.Scanner
- next rune
- comment bytes.Buffer
- useStringDescriptions bool
-}
-
-type Ident struct {
- Name string
- Loc errors.Location
-}
-
-func NewLexer(s string, useStringDescriptions bool) *Lexer {
- sc := &scanner.Scanner{
- Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings,
- }
- sc.Init(strings.NewReader(s))
-
- l := Lexer{sc: sc, useStringDescriptions: useStringDescriptions}
- l.sc.Error = l.CatchScannerError
-
- return &l
-}
-
-func (l *Lexer) CatchSyntaxError(f func()) (errRes *errors.QueryError) {
- defer func() {
- if err := recover(); err != nil {
- if err, ok := err.(syntaxError); ok {
- errRes = errors.Errorf("syntax error: %s", err)
- errRes.Locations = []errors.Location{l.Location()}
- return
- }
- panic(err)
- }
- }()
-
- f()
- return
-}
-
-func (l *Lexer) Peek() rune {
- return l.next
-}
-
-// ConsumeWhitespace consumes whitespace and tokens equivalent to whitespace (e.g. commas and comments).
-//
-// Consumed comment characters will build the description for the next type or field encountered.
-// The description is available from `DescComment()`, and will be reset every time `ConsumeWhitespace()` is
-// executed unless l.useStringDescriptions is set.
-func (l *Lexer) ConsumeWhitespace() {
- l.comment.Reset()
- for {
- l.next = l.sc.Scan()
-
- if l.next == ',' {
- // Similar to white space and line terminators, commas (',') are used to improve the
- // legibility of source text and separate lexical tokens but are otherwise syntactically and
- // semantically insignificant within GraphQL documents.
- //
- // http://facebook.github.io/graphql/draft/#sec-Insignificant-Commas
- continue
- }
-
- if l.next == '#' {
- // GraphQL source documents may contain single-line comments, starting with the '#' marker.
- //
- // A comment can contain any Unicode code point except `LineTerminator` so a comment always
- // consists of all code points starting with the '#' character up to but not including the
- // line terminator.
- l.consumeComment()
- continue
- }
-
- break
- }
-}
-
-// consumeDescription optionally consumes a description based on the June 2018 graphql spec if any are present.
-//
-// Single quote strings are also single line. Triple quote strings can be multi-line. Triple quote strings
-// whitespace trimmed on both ends.
-// If a description is found, consume any following comments as well
-//
-// http://facebook.github.io/graphql/June2018/#sec-Descriptions
-func (l *Lexer) consumeDescription() string {
- // If the next token is not a string, we don't consume it
- if l.next != scanner.String {
- return ""
- }
- // Triple quote string is an empty "string" followed by an open quote due to the way the parser treats strings as one token
- var desc string
- if l.sc.Peek() == '"' {
- desc = l.consumeTripleQuoteComment()
- } else {
- desc = l.consumeStringComment()
- }
- l.ConsumeWhitespace()
- return desc
-}
-
-func (l *Lexer) ConsumeIdent() string {
- name := l.sc.TokenText()
- l.ConsumeToken(scanner.Ident)
- return name
-}
-
-func (l *Lexer) ConsumeIdentWithLoc() types.Ident {
- loc := l.Location()
- name := l.sc.TokenText()
- l.ConsumeToken(scanner.Ident)
- return types.Ident{Name: name, Loc: loc}
-}
-
-func (l *Lexer) ConsumeKeyword(keyword string) {
- if l.next != scanner.Ident || l.sc.TokenText() != keyword {
- l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %q", l.sc.TokenText(), keyword))
- }
- l.ConsumeWhitespace()
-}
-
-func (l *Lexer) ConsumeLiteral() *types.PrimitiveValue {
- lit := &types.PrimitiveValue{Type: l.next, Text: l.sc.TokenText()}
- l.ConsumeWhitespace()
- return lit
-}
-
-func (l *Lexer) ConsumeToken(expected rune) {
- if l.next != expected {
- l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %s", l.sc.TokenText(), scanner.TokenString(expected)))
- }
- l.ConsumeWhitespace()
-}
-
-func (l *Lexer) DescComment() string {
- comment := l.comment.String()
- desc := l.consumeDescription()
- if l.useStringDescriptions {
- return desc
- }
- return comment
-}
-
-func (l *Lexer) SyntaxError(message string) {
- panic(syntaxError(message))
-}
-
-func (l *Lexer) Location() errors.Location {
- return errors.Location{
- Line: l.sc.Line,
- Column: l.sc.Column,
- }
-}
-
-func (l *Lexer) consumeTripleQuoteComment() string {
- l.next = l.sc.Next()
- if l.next != '"' {
- panic("consumeTripleQuoteComment used in wrong context: no third quote?")
- }
-
- var buf bytes.Buffer
- var numQuotes int
- for {
- l.next = l.sc.Next()
- if l.next == '"' {
- numQuotes++
- } else {
- numQuotes = 0
- }
- buf.WriteRune(l.next)
- if numQuotes == 3 || l.next == scanner.EOF {
- break
- }
- }
- val := buf.String()
- val = val[:len(val)-numQuotes]
- return blockString(val)
-}
-
-func (l *Lexer) consumeStringComment() string {
- val, err := strconv.Unquote(l.sc.TokenText())
- if err != nil {
- panic(err)
- }
- return val
-}
-
-// consumeComment consumes all characters from `#` to the first encountered line terminator.
-// The characters are appended to `l.comment`.
-func (l *Lexer) consumeComment() {
- if l.next != '#' {
- panic("consumeComment used in wrong context")
- }
-
- // TODO: count and trim whitespace so we can dedent any following lines.
- if l.sc.Peek() == ' ' {
- l.sc.Next()
- }
-
- if l.comment.Len() > 0 {
- l.comment.WriteRune('\n')
- }
-
- for {
- next := l.sc.Next()
- if next == '\r' || next == '\n' || next == scanner.EOF {
- break
- }
- l.comment.WriteRune(next)
- }
-}
-
-func (l *Lexer) CatchScannerError(s *scanner.Scanner, msg string) {
- l.SyntaxError(msg)
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go
deleted file mode 100644
index a6af3c43..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package common
-
-import (
- "text/scanner"
-
- "github.com/graph-gophers/graphql-go/types"
-)
-
-func ParseLiteral(l *Lexer, constOnly bool) types.Value {
- loc := l.Location()
- switch l.Peek() {
- case '$':
- if constOnly {
- l.SyntaxError("variable not allowed")
- panic("unreachable")
- }
- l.ConsumeToken('$')
- return &types.Variable{Name: l.ConsumeIdent(), Loc: loc}
-
- case scanner.Int, scanner.Float, scanner.String, scanner.Ident:
- lit := l.ConsumeLiteral()
- if lit.Type == scanner.Ident && lit.Text == "null" {
- return &types.NullValue{Loc: loc}
- }
- lit.Loc = loc
- return lit
- case '-':
- l.ConsumeToken('-')
- lit := l.ConsumeLiteral()
- lit.Text = "-" + lit.Text
- lit.Loc = loc
- return lit
- case '[':
- l.ConsumeToken('[')
- var list []types.Value
- for l.Peek() != ']' {
- list = append(list, ParseLiteral(l, constOnly))
- }
- l.ConsumeToken(']')
- return &types.ListValue{Values: list, Loc: loc}
-
- case '{':
- l.ConsumeToken('{')
- var fields []*types.ObjectField
- for l.Peek() != '}' {
- name := l.ConsumeIdentWithLoc()
- l.ConsumeToken(':')
- value := ParseLiteral(l, constOnly)
- fields = append(fields, &types.ObjectField{Name: name, Value: value})
- }
- l.ConsumeToken('}')
- return &types.ObjectValue{Fields: fields, Loc: loc}
-
- default:
- l.SyntaxError("invalid value")
- panic("unreachable")
- }
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go
deleted file mode 100644
index 4a30f46e..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package common
-
-import (
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-func ParseType(l *Lexer) types.Type {
- t := parseNullType(l)
- if l.Peek() == '!' {
- l.ConsumeToken('!')
- return &types.NonNull{OfType: t}
- }
- return t
-}
-
-func parseNullType(l *Lexer) types.Type {
- if l.Peek() == '[' {
- l.ConsumeToken('[')
- ofType := ParseType(l)
- l.ConsumeToken(']')
- return &types.List{OfType: ofType}
- }
-
- return &types.TypeName{Ident: l.ConsumeIdentWithLoc()}
-}
-
-type Resolver func(name string) types.Type
-
-// ResolveType attempts to resolve a type's name against a resolving function.
-// This function is used when one needs to check if a TypeName exists in the resolver (typically a Schema).
-//
-// In the example below, ResolveType would be used to check if the resolving function
-// returns a valid type for Dimension:
-//
-// type Profile {
-// picture(dimensions: Dimension): Url
-// }
-//
-// ResolveType recursively unwraps List and NonNull types until a NamedType is reached.
-func ResolveType(t types.Type, resolver Resolver) (types.Type, *errors.QueryError) {
- switch t := t.(type) {
- case *types.List:
- ofType, err := ResolveType(t.OfType, resolver)
- if err != nil {
- return nil, err
- }
- return &types.List{OfType: ofType}, nil
- case *types.NonNull:
- ofType, err := ResolveType(t.OfType, resolver)
- if err != nil {
- return nil, err
- }
- return &types.NonNull{OfType: ofType}, nil
- case *types.TypeName:
- refT := resolver(t.Name)
- if refT == nil {
- err := errors.Errorf("Unknown type %q.", t.Name)
- err.Rule = "KnownTypeNames"
- err.Locations = []errors.Location{t.Loc}
- return nil, err
- }
- return refT, nil
- default:
- return t, nil
- }
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go
deleted file mode 100644
index 2d6e0b54..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package common
-
-import (
- "github.com/graph-gophers/graphql-go/types"
-)
-
-func ParseInputValue(l *Lexer) *types.InputValueDefinition {
- p := &types.InputValueDefinition{}
- p.Loc = l.Location()
- p.Desc = l.DescComment()
- p.Name = l.ConsumeIdentWithLoc()
- l.ConsumeToken(':')
- p.TypeLoc = l.Location()
- p.Type = ParseType(l)
- if l.Peek() == '=' {
- l.ConsumeToken('=')
- p.Default = ParseLiteral(l, true)
- }
- p.Directives = ParseDirectives(l)
- return p
-}
-
-func ParseArgumentList(l *Lexer) types.ArgumentList {
- var args types.ArgumentList
- l.ConsumeToken('(')
- for l.Peek() != ')' {
- name := l.ConsumeIdentWithLoc()
- l.ConsumeToken(':')
- value := ParseLiteral(l, false)
- args = append(args, &types.Argument{
- Name: name,
- Value: value,
- })
- }
- l.ConsumeToken(')')
- return args
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go
deleted file mode 100644
index 6b478487..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package exec
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "reflect"
- "sync"
- "time"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
- "github.com/graph-gophers/graphql-go/internal/exec/selected"
- "github.com/graph-gophers/graphql-go/internal/query"
- "github.com/graph-gophers/graphql-go/log"
- "github.com/graph-gophers/graphql-go/trace"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type Request struct {
- selected.Request
- Limiter chan struct{}
- Tracer trace.Tracer
- Logger log.Logger
- PanicHandler errors.PanicHandler
- SubscribeResolverTimeout time.Duration
-}
-
-func (r *Request) handlePanic(ctx context.Context) {
- if value := recover(); value != nil {
- r.Logger.LogPanic(ctx, value)
- r.AddError(r.PanicHandler.MakePanicError(ctx, value))
- }
-}
-
-type extensionser interface {
- Extensions() map[string]interface{}
-}
-
-func (r *Request) Execute(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) ([]byte, []*errors.QueryError) {
- var out bytes.Buffer
- func() {
- defer r.handlePanic(ctx)
- sels := selected.ApplyOperation(&r.Request, s, op)
- r.execSelections(ctx, sels, nil, s, s.Resolver, &out, op.Type == query.Mutation)
- }()
-
- if err := ctx.Err(); err != nil {
- return nil, []*errors.QueryError{errors.Errorf("%s", err)}
- }
-
- return out.Bytes(), r.Errs
-}
-
-type fieldToExec struct {
- field *selected.SchemaField
- sels []selected.Selection
- resolver reflect.Value
- out *bytes.Buffer
-}
-
-func resolvedToNull(b *bytes.Buffer) bool {
- return bytes.Equal(b.Bytes(), []byte("null"))
-}
-
-func (r *Request) execSelections(ctx context.Context, sels []selected.Selection, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer, serially bool) {
- async := !serially && selected.HasAsyncSel(sels)
-
- var fields []*fieldToExec
- collectFieldsToResolve(sels, s, resolver, &fields, make(map[string]*fieldToExec))
-
- if async {
- var wg sync.WaitGroup
- wg.Add(len(fields))
- for _, f := range fields {
- go func(f *fieldToExec) {
- defer wg.Done()
- defer r.handlePanic(ctx)
- f.out = new(bytes.Buffer)
- execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true)
- }(f)
- }
- wg.Wait()
- } else {
- for _, f := range fields {
- f.out = new(bytes.Buffer)
- execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true)
- }
- }
-
- out.WriteByte('{')
- for i, f := range fields {
- // If a non-nullable child resolved to null, an error was added to the
- // "errors" list in the response, so this field resolves to null.
- // If this field is non-nullable, the error is propagated to its parent.
- if _, ok := f.field.Type.(*types.NonNull); ok && resolvedToNull(f.out) {
- out.Reset()
- out.Write([]byte("null"))
- return
- }
-
- if i > 0 {
- out.WriteByte(',')
- }
- out.WriteByte('"')
- out.WriteString(f.field.Alias)
- out.WriteByte('"')
- out.WriteByte(':')
- out.Write(f.out.Bytes())
- }
- out.WriteByte('}')
-}
-
-func collectFieldsToResolve(sels []selected.Selection, s *resolvable.Schema, resolver reflect.Value, fields *[]*fieldToExec, fieldByAlias map[string]*fieldToExec) {
- for _, sel := range sels {
- switch sel := sel.(type) {
- case *selected.SchemaField:
- field, ok := fieldByAlias[sel.Alias]
- if !ok { // validation already checked for conflict (TODO)
- field = &fieldToExec{field: sel, resolver: resolver}
- fieldByAlias[sel.Alias] = field
- *fields = append(*fields, field)
- }
- field.sels = append(field.sels, sel.Sels...)
-
- case *selected.TypenameField:
- _, ok := fieldByAlias[sel.Alias]
- if !ok {
- res := reflect.ValueOf(typeOf(sel, resolver))
- f := s.FieldTypename
- f.TypeName = res.String()
-
- sf := &selected.SchemaField{
- Field: f,
- Alias: sel.Alias,
- FixedResult: res,
- }
-
- field := &fieldToExec{field: sf, resolver: resolver}
- *fields = append(*fields, field)
- fieldByAlias[sel.Alias] = field
- }
-
- case *selected.TypeAssertion:
- out := resolver.Method(sel.MethodIndex).Call(nil)
- if !out[1].Bool() {
- continue
- }
- collectFieldsToResolve(sel.Sels, s, out[0], fields, fieldByAlias)
-
- default:
- panic("unreachable")
- }
- }
-}
-
-func typeOf(tf *selected.TypenameField, resolver reflect.Value) string {
- if len(tf.TypeAssertions) == 0 {
- return tf.Name
- }
- for name, a := range tf.TypeAssertions {
- out := resolver.Method(a.MethodIndex).Call(nil)
- if out[1].Bool() {
- return name
- }
- }
- return ""
-}
-
-func execFieldSelection(ctx context.Context, r *Request, s *resolvable.Schema, f *fieldToExec, path *pathSegment, applyLimiter bool) {
- if applyLimiter {
- r.Limiter <- struct{}{}
- }
-
- var result reflect.Value
- var err *errors.QueryError
-
- traceCtx, finish := r.Tracer.TraceField(ctx, f.field.TraceLabel, f.field.TypeName, f.field.Name, !f.field.Async, f.field.Args)
- defer func() {
- finish(err)
- }()
-
- err = func() (err *errors.QueryError) {
- defer func() {
- if panicValue := recover(); panicValue != nil {
- r.Logger.LogPanic(ctx, panicValue)
- err = r.PanicHandler.MakePanicError(ctx, panicValue)
- err.Path = path.toSlice()
- }
- }()
-
- if f.field.FixedResult.IsValid() {
- result = f.field.FixedResult
- return nil
- }
-
- if err := traceCtx.Err(); err != nil {
- return errors.Errorf("%s", err) // don't execute any more resolvers if context got cancelled
- }
-
- res := f.resolver
- if f.field.UseMethodResolver() {
- var in []reflect.Value
- if f.field.HasContext {
- in = append(in, reflect.ValueOf(traceCtx))
- }
- if f.field.ArgsPacker != nil {
- in = append(in, f.field.PackedArgs)
- }
- callOut := res.Method(f.field.MethodIndex).Call(in)
- result = callOut[0]
- if f.field.HasError && !callOut[1].IsNil() {
- resolverErr := callOut[1].Interface().(error)
- err := errors.Errorf("%s", resolverErr)
- err.Path = path.toSlice()
- err.ResolverError = resolverErr
- if ex, ok := callOut[1].Interface().(extensionser); ok {
- err.Extensions = ex.Extensions()
- }
- return err
- }
- } else {
- // TODO extract out unwrapping ptr logic to a common place
- if res.Kind() == reflect.Ptr {
- res = res.Elem()
- }
- result = res.FieldByIndex(f.field.FieldIndex)
- }
- return nil
- }()
-
- if applyLimiter {
- <-r.Limiter
- }
-
- if err != nil {
- // If an error occurred while resolving a field, it should be treated as though the field
- // returned null, and an error must be added to the "errors" list in the response.
- r.AddError(err)
- f.out.WriteString("null")
- return
- }
-
- r.execSelectionSet(traceCtx, f.sels, f.field.Type, path, s, result, f.out)
-}
-
-func (r *Request) execSelectionSet(ctx context.Context, sels []selected.Selection, typ types.Type, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) {
- t, nonNull := unwrapNonNull(typ)
-
- // a reflect.Value of a nil interface will show up as an Invalid value
- if resolver.Kind() == reflect.Invalid || ((resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface) && resolver.IsNil()) {
- // If a field of a non-null type resolves to null (either because the
- // function to resolve the field returned null or because an error occurred),
- // add an error to the "errors" list in the response.
- if nonNull {
- err := errors.Errorf("graphql: got nil for non-null %q", t)
- err.Path = path.toSlice()
- r.AddError(err)
- }
- out.WriteString("null")
- return
- }
-
- switch t.(type) {
- case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
- r.execSelections(ctx, sels, path, s, resolver, out, false)
- return
- }
-
- // Any pointers or interfaces at this point should be non-nil, so we can get the actual value of them
- // for serialization
- if resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface {
- resolver = resolver.Elem()
- }
-
- switch t := t.(type) {
- case *types.List:
- r.execList(ctx, sels, t, path, s, resolver, out)
-
- case *types.ScalarTypeDefinition:
- v := resolver.Interface()
- data, err := json.Marshal(v)
- if err != nil {
- panic(errors.Errorf("could not marshal %v: %s", v, err))
- }
- out.Write(data)
-
- case *types.EnumTypeDefinition:
- var stringer fmt.Stringer = resolver
- if s, ok := resolver.Interface().(fmt.Stringer); ok {
- stringer = s
- }
- name := stringer.String()
- var valid bool
- for _, v := range t.EnumValuesDefinition {
- if v.EnumValue == name {
- valid = true
- break
- }
- }
- if !valid {
- err := errors.Errorf("Invalid value %s.\nExpected type %s, found %s.", name, t.Name, name)
- err.Path = path.toSlice()
- r.AddError(err)
- out.WriteString("null")
- return
- }
- out.WriteByte('"')
- out.WriteString(name)
- out.WriteByte('"')
-
- default:
- panic("unreachable")
- }
-}
-
-func (r *Request) execList(ctx context.Context, sels []selected.Selection, typ *types.List, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) {
- l := resolver.Len()
- entryouts := make([]bytes.Buffer, l)
-
- if selected.HasAsyncSel(sels) {
- // Limit the number of concurrent goroutines spawned as it can lead to large
- // memory spikes for large lists.
- concurrency := cap(r.Limiter)
- sem := make(chan struct{}, concurrency)
- for i := 0; i < l; i++ {
- sem <- struct{}{}
- go func(i int) {
- defer func() { <-sem }()
- defer r.handlePanic(ctx)
- r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i])
- }(i)
- }
- for i := 0; i < concurrency; i++ {
- sem <- struct{}{}
- }
- } else {
- for i := 0; i < l; i++ {
- r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i])
- }
- }
-
- _, listOfNonNull := typ.OfType.(*types.NonNull)
-
- out.WriteByte('[')
- for i, entryout := range entryouts {
- // If the list wraps a non-null type and one of the list elements
- // resolves to null, then the entire list resolves to null.
- if listOfNonNull && resolvedToNull(&entryout) {
- out.Reset()
- out.WriteString("null")
- return
- }
-
- if i > 0 {
- out.WriteByte(',')
- }
- out.Write(entryout.Bytes())
- }
- out.WriteByte(']')
-}
-
-func unwrapNonNull(t types.Type) (types.Type, bool) {
- if nn, ok := t.(*types.NonNull); ok {
- return nn.OfType, true
- }
- return t, false
-}
-
-type pathSegment struct {
- parent *pathSegment
- value interface{}
-}
-
-func (p *pathSegment) toSlice() []interface{} {
- if p == nil {
- return nil
- }
- return append(p.parent.toSlice(), p.value)
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go
deleted file mode 100644
index c0bb7dc9..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go
+++ /dev/null
@@ -1,390 +0,0 @@
-package packer
-
-import (
- "fmt"
- "math"
- "reflect"
- "strings"
-
- "github.com/graph-gophers/graphql-go/decode"
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type packer interface {
- Pack(value interface{}) (reflect.Value, error)
-}
-
-type Builder struct {
- packerMap map[typePair]*packerMapEntry
- structPackers []*StructPacker
-}
-
-type typePair struct {
- graphQLType types.Type
- resolverType reflect.Type
-}
-
-type packerMapEntry struct {
- packer packer
- targets []*packer
-}
-
-func NewBuilder() *Builder {
- return &Builder{
- packerMap: make(map[typePair]*packerMapEntry),
- }
-}
-
-func (b *Builder) Finish() error {
- for _, entry := range b.packerMap {
- for _, target := range entry.targets {
- *target = entry.packer
- }
- }
-
- for _, p := range b.structPackers {
- p.defaultStruct = reflect.New(p.structType).Elem()
- for _, f := range p.fields {
- if defaultVal := f.field.Default; defaultVal != nil {
- v, err := f.fieldPacker.Pack(defaultVal.Deserialize(nil))
- if err != nil {
- return err
- }
- p.defaultStruct.FieldByIndex(f.fieldIndex).Set(v)
- }
- }
- }
-
- return nil
-}
-
-func (b *Builder) assignPacker(target *packer, schemaType types.Type, reflectType reflect.Type) error {
- k := typePair{schemaType, reflectType}
- ref, ok := b.packerMap[k]
- if !ok {
- ref = &packerMapEntry{}
- b.packerMap[k] = ref
- var err error
- ref.packer, err = b.makePacker(schemaType, reflectType)
- if err != nil {
- return err
- }
- }
- ref.targets = append(ref.targets, target)
- return nil
-}
-
-func (b *Builder) makePacker(schemaType types.Type, reflectType reflect.Type) (packer, error) {
- t, nonNull := unwrapNonNull(schemaType)
- if !nonNull {
- if reflectType.Kind() == reflect.Ptr {
- elemType := reflectType.Elem()
- addPtr := true
- if _, ok := t.(*types.InputObject); ok {
- elemType = reflectType // keep pointer for input objects
- addPtr = false
- }
- elem, err := b.makeNonNullPacker(t, elemType)
- if err != nil {
- return nil, err
- }
- return &nullPacker{
- elemPacker: elem,
- valueType: reflectType,
- addPtr: addPtr,
- }, nil
- } else if isNullable(reflectType) {
- elemType := reflectType
- addPtr := false
- elem, err := b.makeNonNullPacker(t, elemType)
- if err != nil {
- return nil, err
- }
- return &nullPacker{
- elemPacker: elem,
- valueType: reflectType,
- addPtr: addPtr,
- }, nil
- } else {
- return nil, fmt.Errorf("%s is not a pointer or a nullable type", reflectType)
- }
- }
-
- return b.makeNonNullPacker(t, reflectType)
-}
-
-func (b *Builder) makeNonNullPacker(schemaType types.Type, reflectType reflect.Type) (packer, error) {
- if u, ok := reflect.New(reflectType).Interface().(decode.Unmarshaler); ok {
- if !u.ImplementsGraphQLType(schemaType.String()) {
- return nil, fmt.Errorf("can not unmarshal %s into %s", schemaType, reflectType)
- }
- return &unmarshalerPacker{
- ValueType: reflectType,
- }, nil
- }
-
- switch t := schemaType.(type) {
- case *types.ScalarTypeDefinition:
- return &ValuePacker{
- ValueType: reflectType,
- }, nil
-
- case *types.EnumTypeDefinition:
- if reflectType.Kind() != reflect.String {
- return nil, fmt.Errorf("wrong type, expected %s", reflect.String)
- }
- return &ValuePacker{
- ValueType: reflectType,
- }, nil
-
- case *types.InputObject:
- e, err := b.MakeStructPacker(t.Values, reflectType)
- if err != nil {
- return nil, err
- }
- return e, nil
-
- case *types.List:
- if reflectType.Kind() != reflect.Slice {
- return nil, fmt.Errorf("expected slice, got %s", reflectType)
- }
- p := &listPacker{
- sliceType: reflectType,
- }
- if err := b.assignPacker(&p.elem, t.OfType, reflectType.Elem()); err != nil {
- return nil, err
- }
- return p, nil
-
- case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
- return nil, fmt.Errorf("type of kind %s can not be used as input", t.Kind())
-
- default:
- panic("unreachable")
- }
-}
-
-func (b *Builder) MakeStructPacker(values []*types.InputValueDefinition, typ reflect.Type) (*StructPacker, error) {
- structType := typ
- usePtr := false
- if typ.Kind() == reflect.Ptr {
- structType = typ.Elem()
- usePtr = true
- }
- if structType.Kind() != reflect.Struct {
- return nil, fmt.Errorf("expected struct or pointer to struct, got %s (hint: missing `args struct { ... }` wrapper for field arguments?)", typ)
- }
-
- var fields []*structPackerField
- for _, v := range values {
- fe := &structPackerField{field: v}
- fx := func(n string) bool {
- return strings.EqualFold(stripUnderscore(n), stripUnderscore(v.Name.Name))
- }
-
- sf, ok := structType.FieldByNameFunc(fx)
- if !ok {
- return nil, fmt.Errorf("%s does not define field %q (hint: missing `args struct { ... }` wrapper for field arguments, or missing field on input struct)", typ, v.Name.Name)
- }
- if sf.PkgPath != "" {
- return nil, fmt.Errorf("field %q must be exported", sf.Name)
- }
- fe.fieldIndex = sf.Index
-
- ft := v.Type
- if v.Default != nil {
- ft, _ = unwrapNonNull(ft)
- ft = &types.NonNull{OfType: ft}
- }
-
- if err := b.assignPacker(&fe.fieldPacker, ft, sf.Type); err != nil {
- return nil, fmt.Errorf("field %q: %s", sf.Name, err)
- }
-
- fields = append(fields, fe)
- }
-
- p := &StructPacker{
- structType: structType,
- usePtr: usePtr,
- fields: fields,
- }
- b.structPackers = append(b.structPackers, p)
- return p, nil
-}
-
-type StructPacker struct {
- structType reflect.Type
- usePtr bool
- defaultStruct reflect.Value
- fields []*structPackerField
-}
-
-type structPackerField struct {
- field *types.InputValueDefinition
- fieldIndex []int
- fieldPacker packer
-}
-
-func (p *StructPacker) Pack(value interface{}) (reflect.Value, error) {
- if value == nil {
- return reflect.Value{}, errors.Errorf("got null for non-null")
- }
-
- values := value.(map[string]interface{})
- v := reflect.New(p.structType)
- v.Elem().Set(p.defaultStruct)
- for _, f := range p.fields {
- if value, ok := values[f.field.Name.Name]; ok {
- packed, err := f.fieldPacker.Pack(value)
- if err != nil {
- return reflect.Value{}, err
- }
- v.Elem().FieldByIndex(f.fieldIndex).Set(packed)
- }
- }
- if !p.usePtr {
- return v.Elem(), nil
- }
- return v, nil
-}
-
-type listPacker struct {
- sliceType reflect.Type
- elem packer
-}
-
-func (e *listPacker) Pack(value interface{}) (reflect.Value, error) {
- list, ok := value.([]interface{})
- if !ok {
- list = []interface{}{value}
- }
-
- v := reflect.MakeSlice(e.sliceType, len(list), len(list))
- for i := range list {
- packed, err := e.elem.Pack(list[i])
- if err != nil {
- return reflect.Value{}, err
- }
- v.Index(i).Set(packed)
- }
- return v, nil
-}
-
-type nullPacker struct {
- elemPacker packer
- valueType reflect.Type
- addPtr bool
-}
-
-func (p *nullPacker) Pack(value interface{}) (reflect.Value, error) {
- if value == nil && !isNullable(p.valueType) {
- return reflect.Zero(p.valueType), nil
- }
-
- v, err := p.elemPacker.Pack(value)
- if err != nil {
- return reflect.Value{}, err
- }
-
- if p.addPtr {
- ptr := reflect.New(p.valueType.Elem())
- ptr.Elem().Set(v)
- return ptr, nil
- }
-
- return v, nil
-}
-
-type ValuePacker struct {
- ValueType reflect.Type
-}
-
-func (p *ValuePacker) Pack(value interface{}) (reflect.Value, error) {
- if value == nil {
- return reflect.Value{}, errors.Errorf("got null for non-null")
- }
-
- coerced, err := unmarshalInput(p.ValueType, value)
- if err != nil {
- return reflect.Value{}, fmt.Errorf("could not unmarshal %#v (%T) into %s: %s", value, value, p.ValueType, err)
- }
- return reflect.ValueOf(coerced), nil
-}
-
-type unmarshalerPacker struct {
- ValueType reflect.Type
-}
-
-func (p *unmarshalerPacker) Pack(value interface{}) (reflect.Value, error) {
- if value == nil && !isNullable(p.ValueType) {
- return reflect.Value{}, errors.Errorf("got null for non-null")
- }
-
- v := reflect.New(p.ValueType)
- if err := v.Interface().(decode.Unmarshaler).UnmarshalGraphQL(value); err != nil {
- return reflect.Value{}, err
- }
- return v.Elem(), nil
-}
-
-func unmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) {
- if reflect.TypeOf(input) == typ {
- return input, nil
- }
-
- switch typ.Kind() {
- case reflect.Int32:
- switch input := input.(type) {
- case int:
- if input < math.MinInt32 || input > math.MaxInt32 {
- return nil, fmt.Errorf("not a 32-bit integer")
- }
- return int32(input), nil
- case float64:
- coerced := int32(input)
- if input < math.MinInt32 || input > math.MaxInt32 || float64(coerced) != input {
- return nil, fmt.Errorf("not a 32-bit integer")
- }
- return coerced, nil
- }
-
- case reflect.Float64:
- switch input := input.(type) {
- case int32:
- return float64(input), nil
- case int:
- return float64(input), nil
- }
-
- case reflect.String:
- if reflect.TypeOf(input).ConvertibleTo(typ) {
- return reflect.ValueOf(input).Convert(typ).Interface(), nil
- }
- }
-
- return nil, fmt.Errorf("incompatible type")
-}
-
-func unwrapNonNull(t types.Type) (types.Type, bool) {
- if nn, ok := t.(*types.NonNull); ok {
- return nn.OfType, true
- }
- return t, false
-}
-
-func stripUnderscore(s string) string {
- return strings.Replace(s, "_", "", -1)
-}
-
-// NullUnmarshaller is an unmarshaller that can handle a nil input
-type NullUnmarshaller interface {
- decode.Unmarshaler
- Nullable()
-}
-
-func isNullable(t reflect.Type) bool {
- _, ok := reflect.New(t).Interface().(NullUnmarshaller)
- return ok
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go
deleted file mode 100644
index 02d5e262..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package resolvable
-
-import (
- "reflect"
-
- "github.com/graph-gophers/graphql-go/introspection"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-// Meta defines the details of the metadata schema for introspection.
-type Meta struct {
- FieldSchema Field
- FieldType Field
- FieldTypename Field
- Schema *Object
- Type *Object
-}
-
-func newMeta(s *types.Schema) *Meta {
- var err error
- b := newBuilder(s)
-
- metaSchema := s.Types["__Schema"].(*types.ObjectTypeDefinition)
- so, err := b.makeObjectExec(metaSchema.Name, metaSchema.Fields, nil, false, reflect.TypeOf(&introspection.Schema{}))
- if err != nil {
- panic(err)
- }
-
- metaType := s.Types["__Type"].(*types.ObjectTypeDefinition)
- t, err := b.makeObjectExec(metaType.Name, metaType.Fields, nil, false, reflect.TypeOf(&introspection.Type{}))
- if err != nil {
- panic(err)
- }
-
- if err := b.finish(); err != nil {
- panic(err)
- }
-
- fieldTypename := Field{
- FieldDefinition: types.FieldDefinition{
- Name: "__typename",
- Type: &types.NonNull{OfType: s.Types["String"]},
- },
- TraceLabel: "GraphQL field: __typename",
- }
-
- fieldSchema := Field{
- FieldDefinition: types.FieldDefinition{
- Name: "__schema",
- Type: s.Types["__Schema"],
- },
- TraceLabel: "GraphQL field: __schema",
- }
-
- fieldType := Field{
- FieldDefinition: types.FieldDefinition{
- Name: "__type",
- Type: s.Types["__Type"],
- },
- TraceLabel: "GraphQL field: __type",
- }
-
- return &Meta{
- FieldSchema: fieldSchema,
- FieldTypename: fieldTypename,
- FieldType: fieldType,
- Schema: so,
- Type: t,
- }
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go
deleted file mode 100644
index 3410f557..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go
+++ /dev/null
@@ -1,453 +0,0 @@
-package resolvable
-
-import (
- "context"
- "fmt"
- "reflect"
- "strings"
-
- "github.com/graph-gophers/graphql-go/decode"
- "github.com/graph-gophers/graphql-go/internal/exec/packer"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type Schema struct {
- *Meta
- types.Schema
- Query Resolvable
- Mutation Resolvable
- Subscription Resolvable
- Resolver reflect.Value
-}
-
-type Resolvable interface {
- isResolvable()
-}
-
-type Object struct {
- Name string
- Fields map[string]*Field
- TypeAssertions map[string]*TypeAssertion
-}
-
-type Field struct {
- types.FieldDefinition
- TypeName string
- MethodIndex int
- FieldIndex []int
- HasContext bool
- HasError bool
- ArgsPacker *packer.StructPacker
- ValueExec Resolvable
- TraceLabel string
-}
-
-func (f *Field) UseMethodResolver() bool {
- return len(f.FieldIndex) == 0
-}
-
-type TypeAssertion struct {
- MethodIndex int
- TypeExec Resolvable
-}
-
-type List struct {
- Elem Resolvable
-}
-
-type Scalar struct{}
-
-func (*Object) isResolvable() {}
-func (*List) isResolvable() {}
-func (*Scalar) isResolvable() {}
-
-func ApplyResolver(s *types.Schema, resolver interface{}) (*Schema, error) {
- if resolver == nil {
- return &Schema{Meta: newMeta(s), Schema: *s}, nil
- }
-
- b := newBuilder(s)
-
- var query, mutation, subscription Resolvable
-
- if t, ok := s.EntryPoints["query"]; ok {
- if err := b.assignExec(&query, t, reflect.TypeOf(resolver)); err != nil {
- return nil, err
- }
- }
-
- if t, ok := s.EntryPoints["mutation"]; ok {
- if err := b.assignExec(&mutation, t, reflect.TypeOf(resolver)); err != nil {
- return nil, err
- }
- }
-
- if t, ok := s.EntryPoints["subscription"]; ok {
- if err := b.assignExec(&subscription, t, reflect.TypeOf(resolver)); err != nil {
- return nil, err
- }
- }
-
- if err := b.finish(); err != nil {
- return nil, err
- }
-
- return &Schema{
- Meta: newMeta(s),
- Schema: *s,
- Resolver: reflect.ValueOf(resolver),
- Query: query,
- Mutation: mutation,
- Subscription: subscription,
- }, nil
-}
-
-type execBuilder struct {
- schema *types.Schema
- resMap map[typePair]*resMapEntry
- packerBuilder *packer.Builder
-}
-
-type typePair struct {
- graphQLType types.Type
- resolverType reflect.Type
-}
-
-type resMapEntry struct {
- exec Resolvable
- targets []*Resolvable
-}
-
-func newBuilder(s *types.Schema) *execBuilder {
- return &execBuilder{
- schema: s,
- resMap: make(map[typePair]*resMapEntry),
- packerBuilder: packer.NewBuilder(),
- }
-}
-
-func (b *execBuilder) finish() error {
- for _, entry := range b.resMap {
- for _, target := range entry.targets {
- *target = entry.exec
- }
- }
-
- return b.packerBuilder.Finish()
-}
-
-func (b *execBuilder) assignExec(target *Resolvable, t types.Type, resolverType reflect.Type) error {
- k := typePair{t, resolverType}
- ref, ok := b.resMap[k]
- if !ok {
- ref = &resMapEntry{}
- b.resMap[k] = ref
- var err error
- ref.exec, err = b.makeExec(t, resolverType)
- if err != nil {
- return err
- }
- }
- ref.targets = append(ref.targets, target)
- return nil
-}
-
-func (b *execBuilder) makeExec(t types.Type, resolverType reflect.Type) (Resolvable, error) {
- var nonNull bool
- t, nonNull = unwrapNonNull(t)
-
- switch t := t.(type) {
- case *types.ObjectTypeDefinition:
- return b.makeObjectExec(t.Name, t.Fields, nil, nonNull, resolverType)
-
- case *types.InterfaceTypeDefinition:
- return b.makeObjectExec(t.Name, t.Fields, t.PossibleTypes, nonNull, resolverType)
-
- case *types.Union:
- return b.makeObjectExec(t.Name, nil, t.UnionMemberTypes, nonNull, resolverType)
- }
-
- if !nonNull {
- if resolverType.Kind() != reflect.Ptr {
- return nil, fmt.Errorf("%s is not a pointer", resolverType)
- }
- resolverType = resolverType.Elem()
- }
-
- switch t := t.(type) {
- case *types.ScalarTypeDefinition:
- return makeScalarExec(t, resolverType)
-
- case *types.EnumTypeDefinition:
- return &Scalar{}, nil
-
- case *types.List:
- if resolverType.Kind() != reflect.Slice {
- return nil, fmt.Errorf("%s is not a slice", resolverType)
- }
- e := &List{}
- if err := b.assignExec(&e.Elem, t.OfType, resolverType.Elem()); err != nil {
- return nil, err
- }
- return e, nil
-
- default:
- panic("invalid type: " + t.String())
- }
-}
-
-func makeScalarExec(t *types.ScalarTypeDefinition, resolverType reflect.Type) (Resolvable, error) {
- implementsType := false
- switch r := reflect.New(resolverType).Interface().(type) {
- case *int32:
- implementsType = t.Name == "Int"
- case *float64:
- implementsType = t.Name == "Float"
- case *string:
- implementsType = t.Name == "String"
- case *bool:
- implementsType = t.Name == "Boolean"
- case decode.Unmarshaler:
- implementsType = r.ImplementsGraphQLType(t.Name)
- }
-
- if !implementsType {
- return nil, fmt.Errorf("can not use %s as %s", resolverType, t.Name)
- }
- return &Scalar{}, nil
-}
-
-func (b *execBuilder) makeObjectExec(typeName string, fields types.FieldsDefinition, possibleTypes []*types.ObjectTypeDefinition,
- nonNull bool, resolverType reflect.Type) (*Object, error) {
- if !nonNull {
- if resolverType.Kind() != reflect.Ptr && resolverType.Kind() != reflect.Interface {
- return nil, fmt.Errorf("%s is not a pointer or interface", resolverType)
- }
- }
-
- methodHasReceiver := resolverType.Kind() != reflect.Interface
-
- Fields := make(map[string]*Field)
- rt := unwrapPtr(resolverType)
- fieldsCount := fieldCount(rt, map[string]int{})
- for _, f := range fields {
- var fieldIndex []int
- methodIndex := findMethod(resolverType, f.Name)
- if b.schema.UseFieldResolvers && methodIndex == -1 {
- if fieldsCount[strings.ToLower(stripUnderscore(f.Name))] > 1 {
- return nil, fmt.Errorf("%s does not resolve %q: ambiguous field %q", resolverType, typeName, f.Name)
- }
- fieldIndex = findField(rt, f.Name, []int{})
- }
- if methodIndex == -1 && len(fieldIndex) == 0 {
- hint := ""
- if findMethod(reflect.PtrTo(resolverType), f.Name) != -1 {
- hint = " (hint: the method exists on the pointer type)"
- }
- return nil, fmt.Errorf("%s does not resolve %q: missing method for field %q%s", resolverType, typeName, f.Name, hint)
- }
-
- var m reflect.Method
- var sf reflect.StructField
- if methodIndex != -1 {
- m = resolverType.Method(methodIndex)
- } else {
- sf = rt.FieldByIndex(fieldIndex)
- }
- fe, err := b.makeFieldExec(typeName, f, m, sf, methodIndex, fieldIndex, methodHasReceiver)
- if err != nil {
- var resolverName string
- if methodIndex != -1 {
- resolverName = m.Name
- } else {
- resolverName = sf.Name
- }
- return nil, fmt.Errorf("%s\n\tused by (%s).%s", err, resolverType, resolverName)
- }
- Fields[f.Name] = fe
- }
-
- // Check type assertions when
- // 1) using method resolvers
- // 2) Or resolver is not an interface type
- typeAssertions := make(map[string]*TypeAssertion)
- if !b.schema.UseFieldResolvers || resolverType.Kind() != reflect.Interface {
- for _, impl := range possibleTypes {
- methodIndex := findMethod(resolverType, "To"+impl.Name)
- if methodIndex == -1 {
- return nil, fmt.Errorf("%s does not resolve %q: missing method %q to convert to %q", resolverType, typeName, "To"+impl.Name, impl.Name)
- }
- if resolverType.Method(methodIndex).Type.NumOut() != 2 {
- return nil, fmt.Errorf("%s does not resolve %q: method %q should return a value and a bool indicating success", resolverType, typeName, "To"+impl.Name)
- }
- a := &TypeAssertion{
- MethodIndex: methodIndex,
- }
- if err := b.assignExec(&a.TypeExec, impl, resolverType.Method(methodIndex).Type.Out(0)); err != nil {
- return nil, err
- }
- typeAssertions[impl.Name] = a
- }
- }
-
- return &Object{
- Name: typeName,
- Fields: Fields,
- TypeAssertions: typeAssertions,
- }, nil
-}
-
-var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
-var errorType = reflect.TypeOf((*error)(nil)).Elem()
-
-func (b *execBuilder) makeFieldExec(typeName string, f *types.FieldDefinition, m reflect.Method, sf reflect.StructField,
- methodIndex int, fieldIndex []int, methodHasReceiver bool) (*Field, error) {
-
- var argsPacker *packer.StructPacker
- var hasError bool
- var hasContext bool
-
- // Validate resolver method only when there is one
- if methodIndex != -1 {
- in := make([]reflect.Type, m.Type.NumIn())
- for i := range in {
- in[i] = m.Type.In(i)
- }
- if methodHasReceiver {
- in = in[1:] // first parameter is receiver
- }
-
- hasContext = len(in) > 0 && in[0] == contextType
- if hasContext {
- in = in[1:]
- }
-
- if len(f.Arguments) > 0 {
- if len(in) == 0 {
- return nil, fmt.Errorf("must have parameter for field arguments")
- }
- var err error
- argsPacker, err = b.packerBuilder.MakeStructPacker(f.Arguments, in[0])
- if err != nil {
- return nil, err
- }
- in = in[1:]
- }
-
- if len(in) > 0 {
- return nil, fmt.Errorf("too many parameters")
- }
-
- maxNumOfReturns := 2
- if m.Type.NumOut() < maxNumOfReturns-1 {
- return nil, fmt.Errorf("too few return values")
- }
-
- if m.Type.NumOut() > maxNumOfReturns {
- return nil, fmt.Errorf("too many return values")
- }
-
- hasError = m.Type.NumOut() == maxNumOfReturns
- if hasError {
- if m.Type.Out(maxNumOfReturns-1) != errorType {
- return nil, fmt.Errorf(`must have "error" as its last return value`)
- }
- }
- }
-
- fe := &Field{
- FieldDefinition: *f,
- TypeName: typeName,
- MethodIndex: methodIndex,
- FieldIndex: fieldIndex,
- HasContext: hasContext,
- ArgsPacker: argsPacker,
- HasError: hasError,
- TraceLabel: fmt.Sprintf("GraphQL field: %s.%s", typeName, f.Name),
- }
-
- var out reflect.Type
- if methodIndex != -1 {
- out = m.Type.Out(0)
- sub, ok := b.schema.EntryPoints["subscription"]
- if ok && typeName == sub.TypeName() && out.Kind() == reflect.Chan {
- out = m.Type.Out(0).Elem()
- }
- } else {
- out = sf.Type
- }
- if err := b.assignExec(&fe.ValueExec, f.Type, out); err != nil {
- return nil, err
- }
-
- return fe, nil
-}
-
-func findMethod(t reflect.Type, name string) int {
- for i := 0; i < t.NumMethod(); i++ {
- if strings.EqualFold(stripUnderscore(name), stripUnderscore(t.Method(i).Name)) {
- return i
- }
- }
- return -1
-}
-
-func findField(t reflect.Type, name string, index []int) []int {
- for i := 0; i < t.NumField(); i++ {
- field := t.Field(i)
-
- if field.Type.Kind() == reflect.Struct && field.Anonymous {
- newIndex := findField(field.Type, name, []int{i})
- if len(newIndex) > 1 {
- return append(index, newIndex...)
- }
- }
-
- if strings.EqualFold(stripUnderscore(name), stripUnderscore(field.Name)) {
- return append(index, i)
- }
- }
-
- return index
-}
-
-// fieldCount helps resolve ambiguity when more than one embedded struct contains fields with the same name.
-func fieldCount(t reflect.Type, count map[string]int) map[string]int {
- if t.Kind() != reflect.Struct {
- return nil
- }
-
- for i := 0; i < t.NumField(); i++ {
- field := t.Field(i)
- fieldName := strings.ToLower(stripUnderscore(field.Name))
-
- if field.Type.Kind() == reflect.Struct && field.Anonymous {
- count = fieldCount(field.Type, count)
- } else {
- if _, ok := count[fieldName]; !ok {
- count[fieldName] = 0
- }
- count[fieldName]++
- }
- }
-
- return count
-}
-
-func unwrapNonNull(t types.Type) (types.Type, bool) {
- if nn, ok := t.(*types.NonNull); ok {
- return nn.OfType, true
- }
- return t, false
-}
-
-func stripUnderscore(s string) string {
- return strings.Replace(s, "_", "", -1)
-}
-
-func unwrapPtr(t reflect.Type) reflect.Type {
- if t.Kind() == reflect.Ptr {
- return t.Elem()
- }
- return t
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go
deleted file mode 100644
index 9b96d2b6..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package selected
-
-import (
- "fmt"
- "reflect"
- "sync"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/exec/packer"
- "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
- "github.com/graph-gophers/graphql-go/internal/query"
- "github.com/graph-gophers/graphql-go/introspection"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type Request struct {
- Schema *types.Schema
- Doc *types.ExecutableDefinition
- Vars map[string]interface{}
- Mu sync.Mutex
- Errs []*errors.QueryError
- DisableIntrospection bool
-}
-
-func (r *Request) AddError(err *errors.QueryError) {
- r.Mu.Lock()
- r.Errs = append(r.Errs, err)
- r.Mu.Unlock()
-}
-
-func ApplyOperation(r *Request, s *resolvable.Schema, op *types.OperationDefinition) []Selection {
- var obj *resolvable.Object
- switch op.Type {
- case query.Query:
- obj = s.Query.(*resolvable.Object)
- case query.Mutation:
- obj = s.Mutation.(*resolvable.Object)
- case query.Subscription:
- obj = s.Subscription.(*resolvable.Object)
- }
- return applySelectionSet(r, s, obj, op.Selections)
-}
-
-type Selection interface {
- isSelection()
-}
-
-type SchemaField struct {
- resolvable.Field
- Alias string
- Args map[string]interface{}
- PackedArgs reflect.Value
- Sels []Selection
- Async bool
- FixedResult reflect.Value
-}
-
-type TypeAssertion struct {
- resolvable.TypeAssertion
- Sels []Selection
-}
-
-type TypenameField struct {
- resolvable.Object
- Alias string
-}
-
-func (*SchemaField) isSelection() {}
-func (*TypeAssertion) isSelection() {}
-func (*TypenameField) isSelection() {}
-
-func applySelectionSet(r *Request, s *resolvable.Schema, e *resolvable.Object, sels []types.Selection) (flattenedSels []Selection) {
- for _, sel := range sels {
- switch sel := sel.(type) {
- case *types.Field:
- field := sel
- if skipByDirective(r, field.Directives) {
- continue
- }
-
- switch field.Name.Name {
- case "__typename":
- // __typename is available even though r.DisableIntrospection == true
- // because it is necessary when using union types and interfaces: https://graphql.org/learn/schema/#union-types
- flattenedSels = append(flattenedSels, &TypenameField{
- Object: *e,
- Alias: field.Alias.Name,
- })
-
- case "__schema":
- if !r.DisableIntrospection {
- flattenedSels = append(flattenedSels, &SchemaField{
- Field: s.Meta.FieldSchema,
- Alias: field.Alias.Name,
- Sels: applySelectionSet(r, s, s.Meta.Schema, field.SelectionSet),
- Async: true,
- FixedResult: reflect.ValueOf(introspection.WrapSchema(r.Schema)),
- })
- }
-
- case "__type":
- if !r.DisableIntrospection {
- p := packer.ValuePacker{ValueType: reflect.TypeOf("")}
- v, err := p.Pack(field.Arguments.MustGet("name").Deserialize(r.Vars))
- if err != nil {
- r.AddError(errors.Errorf("%s", err))
- return nil
- }
-
- t, ok := r.Schema.Types[v.String()]
- if !ok {
- return nil
- }
-
- flattenedSels = append(flattenedSels, &SchemaField{
- Field: s.Meta.FieldType,
- Alias: field.Alias.Name,
- Sels: applySelectionSet(r, s, s.Meta.Type, field.SelectionSet),
- Async: true,
- FixedResult: reflect.ValueOf(introspection.WrapType(t)),
- })
- }
-
- default:
- fe := e.Fields[field.Name.Name]
-
- var args map[string]interface{}
- var packedArgs reflect.Value
- if fe.ArgsPacker != nil {
- args = make(map[string]interface{})
- for _, arg := range field.Arguments {
- args[arg.Name.Name] = arg.Value.Deserialize(r.Vars)
- }
- var err error
- packedArgs, err = fe.ArgsPacker.Pack(args)
- if err != nil {
- r.AddError(errors.Errorf("%s", err))
- return
- }
- }
-
- fieldSels := applyField(r, s, fe.ValueExec, field.SelectionSet)
- flattenedSels = append(flattenedSels, &SchemaField{
- Field: *fe,
- Alias: field.Alias.Name,
- Args: args,
- PackedArgs: packedArgs,
- Sels: fieldSels,
- Async: fe.HasContext || fe.ArgsPacker != nil || fe.HasError || HasAsyncSel(fieldSels),
- })
- }
-
- case *types.InlineFragment:
- frag := sel
- if skipByDirective(r, frag.Directives) {
- continue
- }
- flattenedSels = append(flattenedSels, applyFragment(r, s, e, &frag.Fragment)...)
-
- case *types.FragmentSpread:
- spread := sel
- if skipByDirective(r, spread.Directives) {
- continue
- }
- flattenedSels = append(flattenedSels, applyFragment(r, s, e, &r.Doc.Fragments.Get(spread.Name.Name).Fragment)...)
-
- default:
- panic("invalid type")
- }
- }
- return
-}
-
-func applyFragment(r *Request, s *resolvable.Schema, e *resolvable.Object, frag *types.Fragment) []Selection {
- if frag.On.Name != e.Name {
- t := r.Schema.Resolve(frag.On.Name)
- face, ok := t.(*types.InterfaceTypeDefinition)
- if !ok && frag.On.Name != "" {
- a, ok2 := e.TypeAssertions[frag.On.Name]
- if !ok2 {
- panic(fmt.Errorf("%q does not implement %q", frag.On, e.Name)) // TODO proper error handling
- }
-
- return []Selection{&TypeAssertion{
- TypeAssertion: *a,
- Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections),
- }}
- }
- if ok && len(face.PossibleTypes) > 0 {
- sels := []Selection{}
- for _, t := range face.PossibleTypes {
- if t.Name == e.Name {
- return applySelectionSet(r, s, e, frag.Selections)
- }
-
- if a, ok := e.TypeAssertions[t.Name]; ok {
- sels = append(sels, &TypeAssertion{
- TypeAssertion: *a,
- Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections),
- })
- }
- }
- if len(sels) == 0 {
- panic(fmt.Errorf("%q does not implement %q", e.Name, frag.On)) // TODO proper error handling
- }
- return sels
- }
- }
- return applySelectionSet(r, s, e, frag.Selections)
-}
-
-func applyField(r *Request, s *resolvable.Schema, e resolvable.Resolvable, sels []types.Selection) []Selection {
- switch e := e.(type) {
- case *resolvable.Object:
- return applySelectionSet(r, s, e, sels)
- case *resolvable.List:
- return applyField(r, s, e.Elem, sels)
- case *resolvable.Scalar:
- return nil
- default:
- panic("unreachable")
- }
-}
-
-func skipByDirective(r *Request, directives types.DirectiveList) bool {
- if d := directives.Get("skip"); d != nil {
- p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
- v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars))
- if err != nil {
- r.AddError(errors.Errorf("%s", err))
- }
- if err == nil && v.Bool() {
- return true
- }
- }
-
- if d := directives.Get("include"); d != nil {
- p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
- v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars))
- if err != nil {
- r.AddError(errors.Errorf("%s", err))
- }
- if err == nil && !v.Bool() {
- return true
- }
- }
-
- return false
-}
-
-func HasAsyncSel(sels []Selection) bool {
- for _, sel := range sels {
- switch sel := sel.(type) {
- case *SchemaField:
- if sel.Async {
- return true
- }
- case *TypeAssertion:
- if HasAsyncSel(sel.Sels) {
- return true
- }
- case *TypenameField:
- // sync
- default:
- panic("unreachable")
- }
- }
- return false
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go
deleted file mode 100644
index 37ebacbc..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package exec
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "reflect"
- "time"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
- "github.com/graph-gophers/graphql-go/internal/exec/selected"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type Response struct {
- Data json.RawMessage
- Errors []*errors.QueryError
-}
-
-func (r *Request) Subscribe(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) <-chan *Response {
- var result reflect.Value
- var f *fieldToExec
- var err *errors.QueryError
- func() {
- defer r.handlePanic(ctx)
-
- sels := selected.ApplyOperation(&r.Request, s, op)
- var fields []*fieldToExec
- collectFieldsToResolve(sels, s, s.Resolver, &fields, make(map[string]*fieldToExec))
-
- // TODO: move this check into validation.Validate
- if len(fields) != 1 {
- err = errors.Errorf("%s", "can subscribe to at most one subscription at a time")
- return
- }
- f = fields[0]
-
- var in []reflect.Value
- if f.field.HasContext {
- in = append(in, reflect.ValueOf(ctx))
- }
- if f.field.ArgsPacker != nil {
- in = append(in, f.field.PackedArgs)
- }
- callOut := f.resolver.Method(f.field.MethodIndex).Call(in)
- result = callOut[0]
-
- if f.field.HasError && !callOut[1].IsNil() {
- switch resolverErr := callOut[1].Interface().(type) {
- case *errors.QueryError:
- err = resolverErr
- case error:
- err = errors.Errorf("%s", resolverErr)
- err.ResolverError = resolverErr
- default:
- panic(fmt.Errorf("can only deal with *QueryError and error types, got %T", resolverErr))
- }
- }
- }()
-
- // Handles the case where the locally executed func above panicked
- if len(r.Request.Errs) > 0 {
- return sendAndReturnClosed(&Response{Errors: r.Request.Errs})
- }
-
- if f == nil {
- return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}})
- }
-
- if err != nil {
- if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild {
- return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}})
- }
- return sendAndReturnClosed(&Response{Data: []byte(fmt.Sprintf(`{"%s":null}`, f.field.Alias)), Errors: []*errors.QueryError{err}})
- }
-
- if ctxErr := ctx.Err(); ctxErr != nil {
- return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{errors.Errorf("%s", ctxErr)}})
- }
-
- c := make(chan *Response)
- // TODO: handle resolver nil channel better?
- if result.IsZero() {
- close(c)
- return c
- }
-
- go func() {
- for {
- // Check subscription context
- chosen, resp, ok := reflect.Select([]reflect.SelectCase{
- {
- Dir: reflect.SelectRecv,
- Chan: reflect.ValueOf(ctx.Done()),
- },
- {
- Dir: reflect.SelectRecv,
- Chan: result,
- },
- })
- switch chosen {
- // subscription context done
- case 0:
- close(c)
- return
- // upstream received
- case 1:
- // upstream closed
- if !ok {
- close(c)
- return
- }
-
- subR := &Request{
- Request: selected.Request{
- Doc: r.Request.Doc,
- Vars: r.Request.Vars,
- Schema: r.Request.Schema,
- },
- Limiter: r.Limiter,
- Tracer: r.Tracer,
- Logger: r.Logger,
- }
- var out bytes.Buffer
- func() {
- timeout := r.SubscribeResolverTimeout
- if timeout == 0 {
- timeout = time.Second
- }
-
- subCtx, cancel := context.WithTimeout(ctx, timeout)
- defer cancel()
-
- // resolve response
- func() {
- defer subR.handlePanic(subCtx)
-
- var buf bytes.Buffer
- subR.execSelectionSet(subCtx, f.sels, f.field.Type, &pathSegment{nil, f.field.Alias}, s, resp, &buf)
-
- propagateChildError := false
- if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild && resolvedToNull(&buf) {
- propagateChildError = true
- }
-
- if !propagateChildError {
- out.WriteString(fmt.Sprintf(`{"%s":`, f.field.Alias))
- out.Write(buf.Bytes())
- out.WriteString(`}`)
- }
- }()
-
- if err := subCtx.Err(); err != nil {
- c <- &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}}
- return
- }
-
- // Send response within timeout
- // TODO: maybe block until sent?
- select {
- case <-subCtx.Done():
- case c <- &Response{Data: out.Bytes(), Errors: subR.Errs}:
- }
- }()
- }
- }
- }()
-
- return c
-}
-
-func sendAndReturnClosed(resp *Response) chan *Response {
- c := make(chan *Response, 1)
- c <- resp
- close(c)
- return c
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go
deleted file mode 100644
index ca0400cd..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package query
-
-import (
- "fmt"
- "text/scanner"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/common"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-const (
- Query types.OperationType = "QUERY"
- Mutation types.OperationType = "MUTATION"
- Subscription types.OperationType = "SUBSCRIPTION"
-)
-
-func Parse(queryString string) (*types.ExecutableDefinition, *errors.QueryError) {
- l := common.NewLexer(queryString, false)
-
- var execDef *types.ExecutableDefinition
- err := l.CatchSyntaxError(func() { execDef = parseExecutableDefinition(l) })
- if err != nil {
- return nil, err
- }
-
- return execDef, nil
-}
-
-func parseExecutableDefinition(l *common.Lexer) *types.ExecutableDefinition {
- ed := &types.ExecutableDefinition{}
- l.ConsumeWhitespace()
- for l.Peek() != scanner.EOF {
- if l.Peek() == '{' {
- op := &types.OperationDefinition{Type: Query, Loc: l.Location()}
- op.Selections = parseSelectionSet(l)
- ed.Operations = append(ed.Operations, op)
- continue
- }
-
- loc := l.Location()
- switch x := l.ConsumeIdent(); x {
- case "query":
- op := parseOperation(l, Query)
- op.Loc = loc
- ed.Operations = append(ed.Operations, op)
-
- case "mutation":
- ed.Operations = append(ed.Operations, parseOperation(l, Mutation))
-
- case "subscription":
- ed.Operations = append(ed.Operations, parseOperation(l, Subscription))
-
- case "fragment":
- frag := parseFragment(l)
- frag.Loc = loc
- ed.Fragments = append(ed.Fragments, frag)
-
- default:
- l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "fragment"`, x))
- }
- }
- return ed
-}
-
-func parseOperation(l *common.Lexer, opType types.OperationType) *types.OperationDefinition {
- op := &types.OperationDefinition{Type: opType}
- op.Name.Loc = l.Location()
- if l.Peek() == scanner.Ident {
- op.Name = l.ConsumeIdentWithLoc()
- }
- op.Directives = common.ParseDirectives(l)
- if l.Peek() == '(' {
- l.ConsumeToken('(')
- for l.Peek() != ')' {
- loc := l.Location()
- l.ConsumeToken('$')
- iv := common.ParseInputValue(l)
- iv.Loc = loc
- op.Vars = append(op.Vars, iv)
- }
- l.ConsumeToken(')')
- }
- op.Selections = parseSelectionSet(l)
- return op
-}
-
-func parseFragment(l *common.Lexer) *types.FragmentDefinition {
- f := &types.FragmentDefinition{}
- f.Name = l.ConsumeIdentWithLoc()
- l.ConsumeKeyword("on")
- f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()}
- f.Directives = common.ParseDirectives(l)
- f.Selections = parseSelectionSet(l)
- return f
-}
-
-func parseSelectionSet(l *common.Lexer) []types.Selection {
- var sels []types.Selection
- l.ConsumeToken('{')
- for l.Peek() != '}' {
- sels = append(sels, parseSelection(l))
- }
- l.ConsumeToken('}')
- return sels
-}
-
-func parseSelection(l *common.Lexer) types.Selection {
- if l.Peek() == '.' {
- return parseSpread(l)
- }
- return parseFieldDef(l)
-}
-
-func parseFieldDef(l *common.Lexer) *types.Field {
- f := &types.Field{}
- f.Alias = l.ConsumeIdentWithLoc()
- f.Name = f.Alias
- if l.Peek() == ':' {
- l.ConsumeToken(':')
- f.Name = l.ConsumeIdentWithLoc()
- }
- if l.Peek() == '(' {
- f.Arguments = common.ParseArgumentList(l)
- }
- f.Directives = common.ParseDirectives(l)
- if l.Peek() == '{' {
- f.SelectionSetLoc = l.Location()
- f.SelectionSet = parseSelectionSet(l)
- }
- return f
-}
-
-func parseSpread(l *common.Lexer) types.Selection {
- loc := l.Location()
- l.ConsumeToken('.')
- l.ConsumeToken('.')
- l.ConsumeToken('.')
-
- f := &types.InlineFragment{Loc: loc}
- if l.Peek() == scanner.Ident {
- ident := l.ConsumeIdentWithLoc()
- if ident.Name != "on" {
- fs := &types.FragmentSpread{
- Name: ident,
- Loc: loc,
- }
- fs.Directives = common.ParseDirectives(l)
- return fs
- }
- f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()}
- }
- f.Directives = common.ParseDirectives(l)
- f.Selections = parseSelectionSet(l)
- return f
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go
deleted file mode 100644
index 9f5bba56..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package schema
-
-import (
- "github.com/graph-gophers/graphql-go/types"
-)
-
-func init() {
- _ = newMeta()
-}
-
-// newMeta initializes an instance of the meta Schema.
-func newMeta() *types.Schema {
- s := &types.Schema{
- EntryPointNames: make(map[string]string),
- Types: make(map[string]types.NamedType),
- Directives: make(map[string]*types.DirectiveDefinition),
- }
-
- err := Parse(s, metaSrc, false)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-var metaSrc = `
- # The ` + "`" + `Int` + "`" + ` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.
- scalar Int
-
- # The ` + "`" + `Float` + "`" + ` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point).
- scalar Float
-
- # The ` + "`" + `String` + "`" + ` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text.
- scalar String
-
- # The ` + "`" + `Boolean` + "`" + ` scalar type represents ` + "`" + `true` + "`" + ` or ` + "`" + `false` + "`" + `.
- scalar Boolean
-
- # The ` + "`" + `ID` + "`" + ` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as ` + "`" + `"4"` + "`" + `) or integer (such as ` + "`" + `4` + "`" + `) input value will be accepted as an ID.
- scalar ID
-
- # Directs the executor to include this field or fragment only when the ` + "`" + `if` + "`" + ` argument is true.
- directive @include(
- # Included when true.
- if: Boolean!
- ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
-
- # Directs the executor to skip this field or fragment when the ` + "`" + `if` + "`" + ` argument is true.
- directive @skip(
- # Skipped when true.
- if: Boolean!
- ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
-
- # Marks an element of a GraphQL schema as no longer supported.
- directive @deprecated(
- # Explains why this element was deprecated, usually also including a suggestion
- # for how to access supported similar data. Formatted in
- # [Markdown](https://daringfireball.net/projects/markdown/).
- reason: String = "No longer supported"
- ) on FIELD_DEFINITION | ENUM_VALUE
-
- # A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
- #
- # In some cases, you need to provide options to alter GraphQL's execution behavior
- # in ways field arguments will not suffice, such as conditionally including or
- # skipping a field. Directives provide this by describing additional information
- # to the executor.
- type __Directive {
- name: String!
- description: String
- locations: [__DirectiveLocation!]!
- args: [__InputValue!]!
- }
-
- # A Directive can be adjacent to many parts of the GraphQL language, a
- # __DirectiveLocation describes one such possible adjacencies.
- enum __DirectiveLocation {
- # Location adjacent to a query operation.
- QUERY
- # Location adjacent to a mutation operation.
- MUTATION
- # Location adjacent to a subscription operation.
- SUBSCRIPTION
- # Location adjacent to a field.
- FIELD
- # Location adjacent to a fragment definition.
- FRAGMENT_DEFINITION
- # Location adjacent to a fragment spread.
- FRAGMENT_SPREAD
- # Location adjacent to an inline fragment.
- INLINE_FRAGMENT
- # Location adjacent to a schema definition.
- SCHEMA
- # Location adjacent to a scalar definition.
- SCALAR
- # Location adjacent to an object type definition.
- OBJECT
- # Location adjacent to a field definition.
- FIELD_DEFINITION
- # Location adjacent to an argument definition.
- ARGUMENT_DEFINITION
- # Location adjacent to an interface definition.
- INTERFACE
- # Location adjacent to a union definition.
- UNION
- # Location adjacent to an enum definition.
- ENUM
- # Location adjacent to an enum value definition.
- ENUM_VALUE
- # Location adjacent to an input object type definition.
- INPUT_OBJECT
- # Location adjacent to an input object field definition.
- INPUT_FIELD_DEFINITION
- }
-
- # One possible value for a given Enum. Enum values are unique values, not a
- # placeholder for a string or numeric value. However an Enum value is returned in
- # a JSON response as a string.
- type __EnumValue {
- name: String!
- description: String
- isDeprecated: Boolean!
- deprecationReason: String
- }
-
- # Object and Interface types are described by a list of Fields, each of which has
- # a name, potentially a list of arguments, and a return type.
- type __Field {
- name: String!
- description: String
- args: [__InputValue!]!
- type: __Type!
- isDeprecated: Boolean!
- deprecationReason: String
- }
-
- # Arguments provided to Fields or Directives and the input fields of an
- # InputObject are represented as Input Values which describe their type and
- # optionally a default value.
- type __InputValue {
- name: String!
- description: String
- type: __Type!
- # A GraphQL-formatted string representing the default value for this input value.
- defaultValue: String
- }
-
- # A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all
- # available types and directives on the server, as well as the entry points for
- # query, mutation, and subscription operations.
- type __Schema {
- # A list of all types supported by this server.
- types: [__Type!]!
- # The type that query operations will be rooted at.
- queryType: __Type!
- # If this server supports mutation, the type that mutation operations will be rooted at.
- mutationType: __Type
- # If this server support subscription, the type that subscription operations will be rooted at.
- subscriptionType: __Type
- # A list of all directives supported by this server.
- directives: [__Directive!]!
- }
-
- # The fundamental unit of any GraphQL Schema is the type. There are many kinds of
- # types in GraphQL as represented by the ` + "`" + `__TypeKind` + "`" + ` enum.
- #
- # Depending on the kind of a type, certain fields describe information about that
- # type. Scalar types provide no information beyond a name and description, while
- # Enum types provide their values. Object and Interface types provide the fields
- # they describe. Abstract types, Union and Interface, provide the Object types
- # possible at runtime. List and NonNull types compose other types.
- type __Type {
- kind: __TypeKind!
- name: String
- description: String
- fields(includeDeprecated: Boolean = false): [__Field!]
- interfaces: [__Type!]
- possibleTypes: [__Type!]
- enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
- inputFields: [__InputValue!]
- ofType: __Type
- }
-
- # An enum describing what kind of type a given ` + "`" + `__Type` + "`" + ` is.
- enum __TypeKind {
- # Indicates this type is a scalar.
- SCALAR
- # Indicates this type is an object. ` + "`" + `fields` + "`" + ` and ` + "`" + `interfaces` + "`" + ` are valid fields.
- OBJECT
- # Indicates this type is an interface. ` + "`" + `fields` + "`" + ` and ` + "`" + `possibleTypes` + "`" + ` are valid fields.
- INTERFACE
- # Indicates this type is a union. ` + "`" + `possibleTypes` + "`" + ` is a valid field.
- UNION
- # Indicates this type is an enum. ` + "`" + `enumValues` + "`" + ` is a valid field.
- ENUM
- # Indicates this type is an input object. ` + "`" + `inputFields` + "`" + ` is a valid field.
- INPUT_OBJECT
- # Indicates this type is a list. ` + "`" + `ofType` + "`" + ` is a valid field.
- LIST
- # Indicates this type is a non-null. ` + "`" + `ofType` + "`" + ` is a valid field.
- NON_NULL
- }
-`
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go
deleted file mode 100644
index fb301c46..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go
+++ /dev/null
@@ -1,586 +0,0 @@
-package schema
-
-import (
- "fmt"
- "text/scanner"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/common"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-// New initializes an instance of Schema.
-func New() *types.Schema {
- s := &types.Schema{
- EntryPointNames: make(map[string]string),
- Types: make(map[string]types.NamedType),
- Directives: make(map[string]*types.DirectiveDefinition),
- }
- m := newMeta()
- for n, t := range m.Types {
- s.Types[n] = t
- }
- for n, d := range m.Directives {
- s.Directives[n] = d
- }
- return s
-}
-
-func Parse(s *types.Schema, schemaString string, useStringDescriptions bool) error {
- l := common.NewLexer(schemaString, useStringDescriptions)
- err := l.CatchSyntaxError(func() { parseSchema(s, l) })
- if err != nil {
- return err
- }
-
- if err := mergeExtensions(s); err != nil {
- return err
- }
-
- for _, t := range s.Types {
- if err := resolveNamedType(s, t); err != nil {
- return err
- }
- }
- for _, d := range s.Directives {
- for _, arg := range d.Arguments {
- t, err := common.ResolveType(arg.Type, s.Resolve)
- if err != nil {
- return err
- }
- arg.Type = t
- }
- }
-
- // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types
- // > While any type can be the root operation type for a GraphQL operation, the type system definition language can
- // > omit the schema definition when the query, mutation, and subscription root types are named Query, Mutation,
- // > and Subscription respectively.
- if len(s.EntryPointNames) == 0 {
- if _, ok := s.Types["Query"]; ok {
- s.EntryPointNames["query"] = "Query"
- }
- if _, ok := s.Types["Mutation"]; ok {
- s.EntryPointNames["mutation"] = "Mutation"
- }
- if _, ok := s.Types["Subscription"]; ok {
- s.EntryPointNames["subscription"] = "Subscription"
- }
- }
- s.EntryPoints = make(map[string]types.NamedType)
- for key, name := range s.EntryPointNames {
- t, ok := s.Types[name]
- if !ok {
- return errors.Errorf("type %q not found", name)
- }
- s.EntryPoints[key] = t
- }
-
- // Interface types need validation: https://spec.graphql.org/draft/#sec-Interfaces.Interfaces-Implementing-Interfaces
- for _, typeDef := range s.Types {
- switch t := typeDef.(type) {
- case *types.InterfaceTypeDefinition:
- for i, implements := range t.Interfaces {
- typ, ok := s.Types[implements.Name]
- if !ok {
- return errors.Errorf("interface %q not found", implements)
- }
- inteface, ok := typ.(*types.InterfaceTypeDefinition)
- if !ok {
- return errors.Errorf("type %q is not an interface", inteface)
- }
-
- for _, f := range inteface.Fields.Names() {
- if t.Fields.Get(f) == nil {
- return errors.Errorf("interface %q expects field %q but %q does not provide it", inteface.Name, f, t.Name)
- }
- }
-
- t.Interfaces[i] = inteface
- }
- default:
- continue
- }
- }
-
- for _, obj := range s.Objects {
- obj.Interfaces = make([]*types.InterfaceTypeDefinition, len(obj.InterfaceNames))
- if err := resolveDirectives(s, obj.Directives, "OBJECT"); err != nil {
- return err
- }
- for _, field := range obj.Fields {
- if err := resolveDirectives(s, field.Directives, "FIELD_DEFINITION"); err != nil {
- return err
- }
- }
- for i, intfName := range obj.InterfaceNames {
- t, ok := s.Types[intfName]
- if !ok {
- return errors.Errorf("interface %q not found", intfName)
- }
- intf, ok := t.(*types.InterfaceTypeDefinition)
- if !ok {
- return errors.Errorf("type %q is not an interface", intfName)
- }
- for _, f := range intf.Fields.Names() {
- if obj.Fields.Get(f) == nil {
- return errors.Errorf("interface %q expects field %q but %q does not provide it", intfName, f, obj.Name)
- }
- }
- obj.Interfaces[i] = intf
- intf.PossibleTypes = append(intf.PossibleTypes, obj)
- }
- }
-
- for _, union := range s.Unions {
- if err := resolveDirectives(s, union.Directives, "UNION"); err != nil {
- return err
- }
- union.UnionMemberTypes = make([]*types.ObjectTypeDefinition, len(union.TypeNames))
- for i, name := range union.TypeNames {
- t, ok := s.Types[name]
- if !ok {
- return errors.Errorf("object type %q not found", name)
- }
- obj, ok := t.(*types.ObjectTypeDefinition)
- if !ok {
- return errors.Errorf("type %q is not an object", name)
- }
- union.UnionMemberTypes[i] = obj
- }
- }
-
- for _, enum := range s.Enums {
- if err := resolveDirectives(s, enum.Directives, "ENUM"); err != nil {
- return err
- }
- for _, value := range enum.EnumValuesDefinition {
- if err := resolveDirectives(s, value.Directives, "ENUM_VALUE"); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func ParseSchema(schemaString string, useStringDescriptions bool) (*types.Schema, error) {
- s := New()
- err := Parse(s, schemaString, useStringDescriptions)
- return s, err
-}
-
-func mergeExtensions(s *types.Schema) error {
- for _, ext := range s.Extensions {
- typ := s.Types[ext.Type.TypeName()]
- if typ == nil {
- return fmt.Errorf("trying to extend unknown type %q", ext.Type.TypeName())
- }
-
- if typ.Kind() != ext.Type.Kind() {
- return fmt.Errorf("trying to extend type %q with type %q", typ.Kind(), ext.Type.Kind())
- }
-
- switch og := typ.(type) {
- case *types.ObjectTypeDefinition:
- e := ext.Type.(*types.ObjectTypeDefinition)
-
- for _, field := range e.Fields {
- if og.Fields.Get(field.Name) != nil {
- return fmt.Errorf("extended field %q already exists", field.Name)
- }
- }
- og.Fields = append(og.Fields, e.Fields...)
-
- for _, en := range e.InterfaceNames {
- for _, on := range og.InterfaceNames {
- if on == en {
- return fmt.Errorf("interface %q implemented in the extension is already implemented in %q", on, og.Name)
- }
- }
- }
- og.InterfaceNames = append(og.InterfaceNames, e.InterfaceNames...)
-
- case *types.InputObject:
- e := ext.Type.(*types.InputObject)
-
- for _, field := range e.Values {
- if og.Values.Get(field.Name.Name) != nil {
- return fmt.Errorf("extended field %q already exists", field.Name)
- }
- }
- og.Values = append(og.Values, e.Values...)
-
- case *types.InterfaceTypeDefinition:
- e := ext.Type.(*types.InterfaceTypeDefinition)
-
- for _, field := range e.Fields {
- if og.Fields.Get(field.Name) != nil {
- return fmt.Errorf("extended field %s already exists", field.Name)
- }
- }
- og.Fields = append(og.Fields, e.Fields...)
-
- case *types.Union:
- e := ext.Type.(*types.Union)
-
- for _, en := range e.TypeNames {
- for _, on := range og.TypeNames {
- if on == en {
- return fmt.Errorf("union type %q already declared in %q", on, og.Name)
- }
- }
- }
- og.TypeNames = append(og.TypeNames, e.TypeNames...)
-
- case *types.EnumTypeDefinition:
- e := ext.Type.(*types.EnumTypeDefinition)
-
- for _, en := range e.EnumValuesDefinition {
- for _, on := range og.EnumValuesDefinition {
- if on.EnumValue == en.EnumValue {
- return fmt.Errorf("enum value %q already declared in %q", on.EnumValue, og.Name)
- }
- }
- }
- og.EnumValuesDefinition = append(og.EnumValuesDefinition, e.EnumValuesDefinition...)
- default:
- return fmt.Errorf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, og.TypeName())
- }
- }
-
- return nil
-}
-
-func resolveNamedType(s *types.Schema, t types.NamedType) error {
- switch t := t.(type) {
- case *types.ObjectTypeDefinition:
- for _, f := range t.Fields {
- if err := resolveField(s, f); err != nil {
- return err
- }
- }
- case *types.InterfaceTypeDefinition:
- for _, f := range t.Fields {
- if err := resolveField(s, f); err != nil {
- return err
- }
- }
- case *types.InputObject:
- if err := resolveInputObject(s, t.Values); err != nil {
- return err
- }
- }
- return nil
-}
-
-func resolveField(s *types.Schema, f *types.FieldDefinition) error {
- t, err := common.ResolveType(f.Type, s.Resolve)
- if err != nil {
- return err
- }
- f.Type = t
- if err := resolveDirectives(s, f.Directives, "FIELD_DEFINITION"); err != nil {
- return err
- }
- return resolveInputObject(s, f.Arguments)
-}
-
-func resolveDirectives(s *types.Schema, directives types.DirectiveList, loc string) error {
- for _, d := range directives {
- dirName := d.Name.Name
- dd, ok := s.Directives[dirName]
- if !ok {
- return errors.Errorf("directive %q not found", dirName)
- }
- validLoc := false
- for _, l := range dd.Locations {
- if l == loc {
- validLoc = true
- break
- }
- }
- if !validLoc {
- return errors.Errorf("invalid location %q for directive %q (must be one of %v)", loc, dirName, dd.Locations)
- }
- for _, arg := range d.Arguments {
- if dd.Arguments.Get(arg.Name.Name) == nil {
- return errors.Errorf("invalid argument %q for directive %q", arg.Name.Name, dirName)
- }
- }
- for _, arg := range dd.Arguments {
- if _, ok := d.Arguments.Get(arg.Name.Name); !ok {
- d.Arguments = append(d.Arguments, &types.Argument{Name: arg.Name, Value: arg.Default})
- }
- }
- }
- return nil
-}
-
-func resolveInputObject(s *types.Schema, values types.ArgumentsDefinition) error {
- for _, v := range values {
- t, err := common.ResolveType(v.Type, s.Resolve)
- if err != nil {
- return err
- }
- v.Type = t
- }
- return nil
-}
-
-func parseSchema(s *types.Schema, l *common.Lexer) {
- l.ConsumeWhitespace()
-
- for l.Peek() != scanner.EOF {
- desc := l.DescComment()
- switch x := l.ConsumeIdent(); x {
-
- case "schema":
- l.ConsumeToken('{')
- for l.Peek() != '}' {
-
- name := l.ConsumeIdent()
- l.ConsumeToken(':')
- typ := l.ConsumeIdent()
- s.EntryPointNames[name] = typ
- }
- l.ConsumeToken('}')
-
- case "type":
- obj := parseObjectDef(l)
- obj.Desc = desc
- s.Types[obj.Name] = obj
- s.Objects = append(s.Objects, obj)
-
- case "interface":
- iface := parseInterfaceDef(l)
- iface.Desc = desc
- s.Types[iface.Name] = iface
-
- case "union":
- union := parseUnionDef(l)
- union.Desc = desc
- s.Types[union.Name] = union
- s.Unions = append(s.Unions, union)
-
- case "enum":
- enum := parseEnumDef(l)
- enum.Desc = desc
- s.Types[enum.Name] = enum
- s.Enums = append(s.Enums, enum)
-
- case "input":
- input := parseInputDef(l)
- input.Desc = desc
- s.Types[input.Name] = input
-
- case "scalar":
- loc := l.Location()
- name := l.ConsumeIdent()
- directives := common.ParseDirectives(l)
- s.Types[name] = &types.ScalarTypeDefinition{Name: name, Desc: desc, Directives: directives, Loc: loc}
-
- case "directive":
- directive := parseDirectiveDef(l)
- directive.Desc = desc
- s.Directives[directive.Name] = directive
-
- case "extend":
- parseExtension(s, l)
-
- default:
- // TODO: Add support for type extensions.
- l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union", "input", "scalar" or "directive"`, x))
- }
- }
-}
-
-func parseObjectDef(l *common.Lexer) *types.ObjectTypeDefinition {
- object := &types.ObjectTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()}
-
- for {
- if l.Peek() == '{' {
- break
- }
-
- if l.Peek() == '@' {
- object.Directives = common.ParseDirectives(l)
- continue
- }
-
- if l.Peek() == scanner.Ident {
- l.ConsumeKeyword("implements")
-
- for l.Peek() != '{' && l.Peek() != '@' {
- if l.Peek() == '&' {
- l.ConsumeToken('&')
- }
-
- object.InterfaceNames = append(object.InterfaceNames, l.ConsumeIdent())
- }
- continue
- }
-
- }
- l.ConsumeToken('{')
- object.Fields = parseFieldsDef(l)
- l.ConsumeToken('}')
-
- return object
-
-}
-
-func parseInterfaceDef(l *common.Lexer) *types.InterfaceTypeDefinition {
- i := &types.InterfaceTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()}
-
- if l.Peek() == scanner.Ident {
- l.ConsumeKeyword("implements")
- i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()})
-
- for l.Peek() == '&' {
- l.ConsumeToken('&')
- i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()})
- }
- }
-
- i.Directives = common.ParseDirectives(l)
-
- l.ConsumeToken('{')
- i.Fields = parseFieldsDef(l)
- l.ConsumeToken('}')
-
- return i
-}
-
-func parseUnionDef(l *common.Lexer) *types.Union {
- union := &types.Union{Loc: l.Location(), Name: l.ConsumeIdent()}
-
- union.Directives = common.ParseDirectives(l)
- l.ConsumeToken('=')
- union.TypeNames = []string{l.ConsumeIdent()}
- for l.Peek() == '|' {
- l.ConsumeToken('|')
- union.TypeNames = append(union.TypeNames, l.ConsumeIdent())
- }
-
- return union
-}
-
-func parseInputDef(l *common.Lexer) *types.InputObject {
- i := &types.InputObject{}
- i.Loc = l.Location()
- i.Name = l.ConsumeIdent()
- i.Directives = common.ParseDirectives(l)
- l.ConsumeToken('{')
- for l.Peek() != '}' {
- i.Values = append(i.Values, common.ParseInputValue(l))
- }
- l.ConsumeToken('}')
- return i
-}
-
-func parseEnumDef(l *common.Lexer) *types.EnumTypeDefinition {
- enum := &types.EnumTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()}
-
- enum.Directives = common.ParseDirectives(l)
- l.ConsumeToken('{')
- for l.Peek() != '}' {
- v := &types.EnumValueDefinition{
- Desc: l.DescComment(),
- Loc: l.Location(),
- EnumValue: l.ConsumeIdent(),
- Directives: common.ParseDirectives(l),
- }
-
- enum.EnumValuesDefinition = append(enum.EnumValuesDefinition, v)
- }
- l.ConsumeToken('}')
- return enum
-}
-func parseDirectiveDef(l *common.Lexer) *types.DirectiveDefinition {
- l.ConsumeToken('@')
- loc := l.Location()
- d := &types.DirectiveDefinition{Name: l.ConsumeIdent(), Loc: loc}
-
- if l.Peek() == '(' {
- l.ConsumeToken('(')
- for l.Peek() != ')' {
- v := common.ParseInputValue(l)
- d.Arguments = append(d.Arguments, v)
- }
- l.ConsumeToken(')')
- }
-
- l.ConsumeKeyword("on")
-
- for {
- loc := l.ConsumeIdent()
- d.Locations = append(d.Locations, loc)
- if l.Peek() != '|' {
- break
- }
- l.ConsumeToken('|')
- }
- return d
-}
-
-func parseExtension(s *types.Schema, l *common.Lexer) {
- loc := l.Location()
- switch x := l.ConsumeIdent(); x {
- case "schema":
- l.ConsumeToken('{')
- for l.Peek() != '}' {
- name := l.ConsumeIdent()
- l.ConsumeToken(':')
- typ := l.ConsumeIdent()
- s.EntryPointNames[name] = typ
- }
- l.ConsumeToken('}')
-
- case "type":
- obj := parseObjectDef(l)
- s.Extensions = append(s.Extensions, &types.Extension{Type: obj, Loc: loc})
-
- case "interface":
- iface := parseInterfaceDef(l)
- s.Extensions = append(s.Extensions, &types.Extension{Type: iface, Loc: loc})
-
- case "union":
- union := parseUnionDef(l)
- s.Extensions = append(s.Extensions, &types.Extension{Type: union, Loc: loc})
-
- case "enum":
- enum := parseEnumDef(l)
- s.Extensions = append(s.Extensions, &types.Extension{Type: enum, Loc: loc})
-
- case "input":
- input := parseInputDef(l)
- s.Extensions = append(s.Extensions, &types.Extension{Type: input, Loc: loc})
-
- default:
- // TODO: Add ScalarTypeDefinition when adding directives
- l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, x))
- }
-}
-
-func parseFieldsDef(l *common.Lexer) types.FieldsDefinition {
- var fields types.FieldsDefinition
- for l.Peek() != '}' {
- f := &types.FieldDefinition{}
- f.Desc = l.DescComment()
- f.Loc = l.Location()
- f.Name = l.ConsumeIdent()
- if l.Peek() == '(' {
- l.ConsumeToken('(')
- for l.Peek() != ')' {
- f.Arguments = append(f.Arguments, common.ParseInputValue(l))
- }
- l.ConsumeToken(')')
- }
- l.ConsumeToken(':')
- f.Type = common.ParseType(l)
- f.Directives = common.ParseDirectives(l)
- fields = append(fields, f)
- }
- return fields
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go
deleted file mode 100644
index 9702b5f5..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package validation
-
-import (
- "fmt"
- "sort"
- "strconv"
- "strings"
-)
-
-func makeSuggestion(prefix string, options []string, input string) string {
- var selected []string
- distances := make(map[string]int)
- for _, opt := range options {
- distance := levenshteinDistance(input, opt)
- threshold := max(len(input)/2, max(len(opt)/2, 1))
- if distance < threshold {
- selected = append(selected, opt)
- distances[opt] = distance
- }
- }
-
- if len(selected) == 0 {
- return ""
- }
- sort.Slice(selected, func(i, j int) bool {
- return distances[selected[i]] < distances[selected[j]]
- })
-
- parts := make([]string, len(selected))
- for i, opt := range selected {
- parts[i] = strconv.Quote(opt)
- }
- if len(parts) > 1 {
- parts[len(parts)-1] = "or " + parts[len(parts)-1]
- }
- return fmt.Sprintf(" %s %s?", prefix, strings.Join(parts, ", "))
-}
-
-func levenshteinDistance(s1, s2 string) int {
- column := make([]int, len(s1)+1)
- for y := range s1 {
- column[y+1] = y + 1
- }
- for x, rx := range s2 {
- column[0] = x + 1
- lastdiag := x
- for y, ry := range s1 {
- olddiag := column[y+1]
- if rx != ry {
- lastdiag++
- }
- column[y+1] = min(column[y+1]+1, min(column[y]+1, lastdiag))
- lastdiag = olddiag
- }
- }
- return column[len(s1)]
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go
deleted file mode 100644
index e3672638..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go
+++ /dev/null
@@ -1,980 +0,0 @@
-package validation
-
-import (
- "fmt"
- "math"
- "reflect"
- "strconv"
- "strings"
- "text/scanner"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/common"
- "github.com/graph-gophers/graphql-go/internal/query"
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type varSet map[*types.InputValueDefinition]struct{}
-
-type selectionPair struct{ a, b types.Selection }
-
-type nameSet map[string]errors.Location
-
-type fieldInfo struct {
- sf *types.FieldDefinition
- parent types.NamedType
-}
-
-type context struct {
- schema *types.Schema
- doc *types.ExecutableDefinition
- errs []*errors.QueryError
- opErrs map[*types.OperationDefinition][]*errors.QueryError
- usedVars map[*types.OperationDefinition]varSet
- fieldMap map[*types.Field]fieldInfo
- overlapValidated map[selectionPair]struct{}
- maxDepth int
-}
-
-func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) {
- c.addErrMultiLoc([]errors.Location{loc}, rule, format, a...)
-}
-
-func (c *context) addErrMultiLoc(locs []errors.Location, rule string, format string, a ...interface{}) {
- c.errs = append(c.errs, &errors.QueryError{
- Message: fmt.Sprintf(format, a...),
- Locations: locs,
- Rule: rule,
- })
-}
-
-type opContext struct {
- *context
- ops []*types.OperationDefinition
-}
-
-func newContext(s *types.Schema, doc *types.ExecutableDefinition, maxDepth int) *context {
- return &context{
- schema: s,
- doc: doc,
- opErrs: make(map[*types.OperationDefinition][]*errors.QueryError),
- usedVars: make(map[*types.OperationDefinition]varSet),
- fieldMap: make(map[*types.Field]fieldInfo),
- overlapValidated: make(map[selectionPair]struct{}),
- maxDepth: maxDepth,
- }
-}
-
-func Validate(s *types.Schema, doc *types.ExecutableDefinition, variables map[string]interface{}, maxDepth int) []*errors.QueryError {
- c := newContext(s, doc, maxDepth)
-
- opNames := make(nameSet)
- fragUsedBy := make(map[*types.FragmentDefinition][]*types.OperationDefinition)
- for _, op := range doc.Operations {
- c.usedVars[op] = make(varSet)
- opc := &opContext{c, []*types.OperationDefinition{op}}
-
- // Check if max depth is exceeded, if it's set. If max depth is exceeded,
- // don't continue to validate the document and exit early.
- if validateMaxDepth(opc, op.Selections, nil, 1) {
- return c.errs
- }
-
- if op.Name.Name == "" && len(doc.Operations) != 1 {
- c.addErr(op.Loc, "LoneAnonymousOperation", "This anonymous operation must be the only defined operation.")
- }
- if op.Name.Name != "" {
- validateName(c, opNames, op.Name, "UniqueOperationNames", "operation")
- }
-
- validateDirectives(opc, string(op.Type), op.Directives)
-
- varNames := make(nameSet)
- for _, v := range op.Vars {
- validateName(c, varNames, v.Name, "UniqueVariableNames", "variable")
-
- t := resolveType(c, v.Type)
- if !canBeInput(t) {
- c.addErr(v.TypeLoc, "VariablesAreInputTypes", "Variable %q cannot be non-input type %q.", "$"+v.Name.Name, t)
- }
- validateValue(opc, v, variables[v.Name.Name], t)
-
- if v.Default != nil {
- validateLiteral(opc, v.Default)
-
- if t != nil {
- if nn, ok := t.(*types.NonNull); ok {
- c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q is required and will not use the default value. Perhaps you meant to use type %q.", "$"+v.Name.Name, t, nn.OfType)
- }
-
- if ok, reason := validateValueType(opc, v.Default, t); !ok {
- c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q has invalid default value %s.\n%s", "$"+v.Name.Name, t, v.Default, reason)
- }
- }
- }
- }
-
- var entryPoint types.NamedType
- switch op.Type {
- case query.Query:
- entryPoint = s.EntryPoints["query"]
- case query.Mutation:
- entryPoint = s.EntryPoints["mutation"]
- case query.Subscription:
- entryPoint = s.EntryPoints["subscription"]
- default:
- panic("unreachable")
- }
-
- validateSelectionSet(opc, op.Selections, entryPoint)
-
- fragUsed := make(map[*types.FragmentDefinition]struct{})
- markUsedFragments(c, op.Selections, fragUsed)
- for frag := range fragUsed {
- fragUsedBy[frag] = append(fragUsedBy[frag], op)
- }
- }
-
- fragNames := make(nameSet)
- fragVisited := make(map[*types.FragmentDefinition]struct{})
- for _, frag := range doc.Fragments {
- opc := &opContext{c, fragUsedBy[frag]}
-
- validateName(c, fragNames, frag.Name, "UniqueFragmentNames", "fragment")
- validateDirectives(opc, "FRAGMENT_DEFINITION", frag.Directives)
-
- t := unwrapType(resolveType(c, &frag.On))
- // continue even if t is nil
- if t != nil && !canBeFragment(t) {
- c.addErr(frag.On.Loc, "FragmentsOnCompositeTypes", "Fragment %q cannot condition on non composite type %q.", frag.Name.Name, t)
- continue
- }
-
- validateSelectionSet(opc, frag.Selections, t)
-
- if _, ok := fragVisited[frag]; !ok {
- detectFragmentCycle(c, frag.Selections, fragVisited, nil, map[string]int{frag.Name.Name: 0})
- }
- }
-
- for _, frag := range doc.Fragments {
- if len(fragUsedBy[frag]) == 0 {
- c.addErr(frag.Loc, "NoUnusedFragments", "Fragment %q is never used.", frag.Name.Name)
- }
- }
-
- for _, op := range doc.Operations {
- c.errs = append(c.errs, c.opErrs[op]...)
-
- opUsedVars := c.usedVars[op]
- for _, v := range op.Vars {
- if _, ok := opUsedVars[v]; !ok {
- opSuffix := ""
- if op.Name.Name != "" {
- opSuffix = fmt.Sprintf(" in operation %q", op.Name.Name)
- }
- c.addErr(v.Loc, "NoUnusedVariables", "Variable %q is never used%s.", "$"+v.Name.Name, opSuffix)
- }
- }
- }
-
- return c.errs
-}
-
-func validateValue(c *opContext, v *types.InputValueDefinition, val interface{}, t types.Type) {
- switch t := t.(type) {
- case *types.NonNull:
- if val == nil {
- c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value null.\nExpected type \"%s\", found null.", v.Name.Name, t)
- return
- }
- validateValue(c, v, val, t.OfType)
- case *types.List:
- if val == nil {
- return
- }
- vv, ok := val.([]interface{})
- if !ok {
- // Input coercion rules allow single items without wrapping array
- validateValue(c, v, val, t.OfType)
- return
- }
- for _, elem := range vv {
- validateValue(c, v, elem, t.OfType)
- }
- case *types.EnumTypeDefinition:
- if val == nil {
- return
- }
- e, ok := val.(string)
- if !ok {
- c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %v.", v.Name.Name, val, t, val)
- return
- }
- for _, option := range t.EnumValuesDefinition {
- if option.EnumValue == e {
- return
- }
- }
- c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value %s.\nExpected type \"%s\", found %s.", v.Name.Name, e, t, e)
- case *types.InputObject:
- if val == nil {
- return
- }
- in, ok := val.(map[string]interface{})
- if !ok {
- c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %s.", v.Name.Name, val, t, val)
- return
- }
- for _, f := range t.Values {
- fieldVal := in[f.Name.Name]
- validateValue(c, f, fieldVal, f.Type)
- }
- }
-}
-
-// validates the query doesn't go deeper than maxDepth (if set). Returns whether
-// or not query validated max depth to avoid excessive recursion.
-//
-// The visited map is necessary to ensure that max depth validation does not get stuck in cyclical
-// fragment spreads.
-func validateMaxDepth(c *opContext, sels []types.Selection, visited map[*types.FragmentDefinition]struct{}, depth int) bool {
- // maxDepth checking is turned off when maxDepth is 0
- if c.maxDepth == 0 {
- return false
- }
-
- exceededMaxDepth := false
- if visited == nil {
- visited = map[*types.FragmentDefinition]struct{}{}
- }
-
- for _, sel := range sels {
- switch sel := sel.(type) {
- case *types.Field:
- if depth > c.maxDepth {
- exceededMaxDepth = true
- c.addErr(sel.Alias.Loc, "MaxDepthExceeded", "Field %q has depth %d that exceeds max depth %d", sel.Name.Name, depth, c.maxDepth)
- continue
- }
- exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.SelectionSet, visited, depth+1)
-
- case *types.InlineFragment:
- // Depth is not checked because inline fragments resolve to other fields which are checked.
- // Depth is not incremented because inline fragments have the same depth as neighboring fields
- exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.Selections, visited, depth)
- case *types.FragmentSpread:
- // Depth is not checked because fragments resolve to other fields which are checked.
- frag := c.doc.Fragments.Get(sel.Name.Name)
- if frag == nil {
- // In case of unknown fragment (invalid request), ignore max depth evaluation
- c.addErr(sel.Loc, "MaxDepthEvaluationError", "Unknown fragment %q. Unable to evaluate depth.", sel.Name.Name)
- continue
- }
-
- if _, ok := visited[frag]; ok {
- // we've already seen this fragment, don't check depth again.
- continue
- }
- visited[frag] = struct{}{}
-
- // Depth is not incremented because fragments have the same depth as surrounding fields
- exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, frag.Selections, visited, depth)
- }
- }
-
- return exceededMaxDepth
-}
-
-func validateSelectionSet(c *opContext, sels []types.Selection, t types.NamedType) {
- for _, sel := range sels {
- validateSelection(c, sel, t)
- }
-
- for i, a := range sels {
- for _, b := range sels[i+1:] {
- c.validateOverlap(a, b, nil, nil)
- }
- }
-}
-
-func validateSelection(c *opContext, sel types.Selection, t types.NamedType) {
- switch sel := sel.(type) {
- case *types.Field:
- validateDirectives(c, "FIELD", sel.Directives)
-
- fieldName := sel.Name.Name
- var f *types.FieldDefinition
- switch fieldName {
- case "__typename":
- f = &types.FieldDefinition{
- Name: "__typename",
- Type: c.schema.Types["String"],
- }
- case "__schema":
- f = &types.FieldDefinition{
- Name: "__schema",
- Type: c.schema.Types["__Schema"],
- }
- case "__type":
- f = &types.FieldDefinition{
- Name: "__type",
- Arguments: types.ArgumentsDefinition{
- &types.InputValueDefinition{
- Name: types.Ident{Name: "name"},
- Type: &types.NonNull{OfType: c.schema.Types["String"]},
- },
- },
- Type: c.schema.Types["__Type"],
- }
- default:
- f = fields(t).Get(fieldName)
- if f == nil && t != nil {
- suggestion := makeSuggestion("Did you mean", fields(t).Names(), fieldName)
- c.addErr(sel.Alias.Loc, "FieldsOnCorrectType", "Cannot query field %q on type %q.%s", fieldName, t, suggestion)
- }
- }
- c.fieldMap[sel] = fieldInfo{sf: f, parent: t}
-
- validateArgumentLiterals(c, sel.Arguments)
- if f != nil {
- validateArgumentTypes(c, sel.Arguments, f.Arguments, sel.Alias.Loc,
- func() string { return fmt.Sprintf("field %q of type %q", fieldName, t) },
- func() string { return fmt.Sprintf("Field %q", fieldName) },
- )
- }
-
- var ft types.Type
- if f != nil {
- ft = f.Type
- sf := hasSubfields(ft)
- if sf && sel.SelectionSet == nil {
- c.addErr(sel.Alias.Loc, "ScalarLeafs", "Field %q of type %q must have a selection of subfields. Did you mean \"%s { ... }\"?", fieldName, ft, fieldName)
- }
- if !sf && sel.SelectionSet != nil {
- c.addErr(sel.SelectionSetLoc, "ScalarLeafs", "Field %q must not have a selection since type %q has no subfields.", fieldName, ft)
- }
- }
- if sel.SelectionSet != nil {
- validateSelectionSet(c, sel.SelectionSet, unwrapType(ft))
- }
-
- case *types.InlineFragment:
- validateDirectives(c, "INLINE_FRAGMENT", sel.Directives)
- if sel.On.Name != "" {
- fragTyp := unwrapType(resolveType(c.context, &sel.On))
- if fragTyp != nil && !compatible(t, fragTyp) {
- c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment cannot be spread here as objects of type %q can never be of type %q.", t, fragTyp)
- }
- t = fragTyp
- // continue even if t is nil
- }
- if t != nil && !canBeFragment(t) {
- c.addErr(sel.On.Loc, "FragmentsOnCompositeTypes", "Fragment cannot condition on non composite type %q.", t)
- return
- }
- validateSelectionSet(c, sel.Selections, unwrapType(t))
-
- case *types.FragmentSpread:
- validateDirectives(c, "FRAGMENT_SPREAD", sel.Directives)
- frag := c.doc.Fragments.Get(sel.Name.Name)
- if frag == nil {
- c.addErr(sel.Name.Loc, "KnownFragmentNames", "Unknown fragment %q.", sel.Name.Name)
- return
- }
- fragTyp := c.schema.Types[frag.On.Name]
- if !compatible(t, fragTyp) {
- c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment %q cannot be spread here as objects of type %q can never be of type %q.", frag.Name.Name, t, fragTyp)
- }
-
- default:
- panic("unreachable")
- }
-}
-
-func compatible(a, b types.Type) bool {
- for _, pta := range possibleTypes(a) {
- for _, ptb := range possibleTypes(b) {
- if pta == ptb {
- return true
- }
- }
- }
- return false
-}
-
-func possibleTypes(t types.Type) []*types.ObjectTypeDefinition {
- switch t := t.(type) {
- case *types.ObjectTypeDefinition:
- return []*types.ObjectTypeDefinition{t}
- case *types.InterfaceTypeDefinition:
- return t.PossibleTypes
- case *types.Union:
- return t.UnionMemberTypes
- default:
- return nil
- }
-}
-
-func markUsedFragments(c *context, sels []types.Selection, fragUsed map[*types.FragmentDefinition]struct{}) {
- for _, sel := range sels {
- switch sel := sel.(type) {
- case *types.Field:
- if sel.SelectionSet != nil {
- markUsedFragments(c, sel.SelectionSet, fragUsed)
- }
-
- case *types.InlineFragment:
- markUsedFragments(c, sel.Selections, fragUsed)
-
- case *types.FragmentSpread:
- frag := c.doc.Fragments.Get(sel.Name.Name)
- if frag == nil {
- return
- }
-
- if _, ok := fragUsed[frag]; ok {
- continue
- }
-
- fragUsed[frag] = struct{}{}
- markUsedFragments(c, frag.Selections, fragUsed)
-
- default:
- panic("unreachable")
- }
- }
-}
-
-func detectFragmentCycle(c *context, sels []types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) {
- for _, sel := range sels {
- detectFragmentCycleSel(c, sel, fragVisited, spreadPath, spreadPathIndex)
- }
-}
-
-func detectFragmentCycleSel(c *context, sel types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) {
- switch sel := sel.(type) {
- case *types.Field:
- if sel.SelectionSet != nil {
- detectFragmentCycle(c, sel.SelectionSet, fragVisited, spreadPath, spreadPathIndex)
- }
-
- case *types.InlineFragment:
- detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex)
-
- case *types.FragmentSpread:
- frag := c.doc.Fragments.Get(sel.Name.Name)
- if frag == nil {
- return
- }
-
- spreadPath = append(spreadPath, sel)
- if i, ok := spreadPathIndex[frag.Name.Name]; ok {
- cyclePath := spreadPath[i:]
- via := ""
- if len(cyclePath) > 1 {
- names := make([]string, len(cyclePath)-1)
- for i, frag := range cyclePath[:len(cyclePath)-1] {
- names[i] = frag.Name.Name
- }
- via = " via " + strings.Join(names, ", ")
- }
-
- locs := make([]errors.Location, len(cyclePath))
- for i, frag := range cyclePath {
- locs[i] = frag.Loc
- }
- c.addErrMultiLoc(locs, "NoFragmentCycles", "Cannot spread fragment %q within itself%s.", frag.Name.Name, via)
- return
- }
-
- if _, ok := fragVisited[frag]; ok {
- return
- }
- fragVisited[frag] = struct{}{}
-
- spreadPathIndex[frag.Name.Name] = len(spreadPath)
- detectFragmentCycle(c, frag.Selections, fragVisited, spreadPath, spreadPathIndex)
- delete(spreadPathIndex, frag.Name.Name)
-
- default:
- panic("unreachable")
- }
-}
-
-func (c *context) validateOverlap(a, b types.Selection, reasons *[]string, locs *[]errors.Location) {
- if a == b {
- return
- }
-
- if _, ok := c.overlapValidated[selectionPair{a, b}]; ok {
- return
- }
- c.overlapValidated[selectionPair{a, b}] = struct{}{}
- c.overlapValidated[selectionPair{b, a}] = struct{}{}
-
- switch a := a.(type) {
- case *types.Field:
- switch b := b.(type) {
- case *types.Field:
- if b.Alias.Loc.Before(a.Alias.Loc) {
- a, b = b, a
- }
- if reasons2, locs2 := c.validateFieldOverlap(a, b); len(reasons2) != 0 {
- locs2 = append(locs2, a.Alias.Loc, b.Alias.Loc)
- if reasons == nil {
- c.addErrMultiLoc(locs2, "OverlappingFieldsCanBeMerged", "Fields %q conflict because %s. Use different aliases on the fields to fetch both if this was intentional.", a.Alias.Name, strings.Join(reasons2, " and "))
- return
- }
- for _, r := range reasons2 {
- *reasons = append(*reasons, fmt.Sprintf("subfields %q conflict because %s", a.Alias.Name, r))
- }
- *locs = append(*locs, locs2...)
- }
-
- case *types.InlineFragment:
- for _, sel := range b.Selections {
- c.validateOverlap(a, sel, reasons, locs)
- }
-
- case *types.FragmentSpread:
- if frag := c.doc.Fragments.Get(b.Name.Name); frag != nil {
- for _, sel := range frag.Selections {
- c.validateOverlap(a, sel, reasons, locs)
- }
- }
-
- default:
- panic("unreachable")
- }
-
- case *types.InlineFragment:
- for _, sel := range a.Selections {
- c.validateOverlap(sel, b, reasons, locs)
- }
-
- case *types.FragmentSpread:
- if frag := c.doc.Fragments.Get(a.Name.Name); frag != nil {
- for _, sel := range frag.Selections {
- c.validateOverlap(sel, b, reasons, locs)
- }
- }
-
- default:
- panic("unreachable")
- }
-}
-
-func (c *context) validateFieldOverlap(a, b *types.Field) ([]string, []errors.Location) {
- if a.Alias.Name != b.Alias.Name {
- return nil, nil
- }
-
- if asf := c.fieldMap[a].sf; asf != nil {
- if bsf := c.fieldMap[b].sf; bsf != nil {
- if !typesCompatible(asf.Type, bsf.Type) {
- return []string{fmt.Sprintf("they return conflicting types %s and %s", asf.Type, bsf.Type)}, nil
- }
- }
- }
-
- at := c.fieldMap[a].parent
- bt := c.fieldMap[b].parent
- if at == nil || bt == nil || at == bt {
- if a.Name.Name != b.Name.Name {
- return []string{fmt.Sprintf("%s and %s are different fields", a.Name.Name, b.Name.Name)}, nil
- }
-
- if argumentsConflict(a.Arguments, b.Arguments) {
- return []string{"they have differing arguments"}, nil
- }
- }
-
- var reasons []string
- var locs []errors.Location
- for _, a2 := range a.SelectionSet {
- for _, b2 := range b.SelectionSet {
- c.validateOverlap(a2, b2, &reasons, &locs)
- }
- }
- return reasons, locs
-}
-
-func argumentsConflict(a, b types.ArgumentList) bool {
- if len(a) != len(b) {
- return true
- }
- for _, argA := range a {
- valB, ok := b.Get(argA.Name.Name)
- if !ok || !reflect.DeepEqual(argA.Value.Deserialize(nil), valB.Deserialize(nil)) {
- return true
- }
- }
- return false
-}
-
-func fields(t types.Type) types.FieldsDefinition {
- switch t := t.(type) {
- case *types.ObjectTypeDefinition:
- return t.Fields
- case *types.InterfaceTypeDefinition:
- return t.Fields
- default:
- return nil
- }
-}
-
-func unwrapType(t types.Type) types.NamedType {
- if t == nil {
- return nil
- }
- for {
- switch t2 := t.(type) {
- case types.NamedType:
- return t2
- case *types.List:
- t = t2.OfType
- case *types.NonNull:
- t = t2.OfType
- default:
- panic("unreachable")
- }
- }
-}
-
-func resolveType(c *context, t types.Type) types.Type {
- t2, err := common.ResolveType(t, c.schema.Resolve)
- if err != nil {
- c.errs = append(c.errs, err)
- }
- return t2
-}
-
-func validateDirectives(c *opContext, loc string, directives types.DirectiveList) {
- directiveNames := make(nameSet)
- for _, d := range directives {
- dirName := d.Name.Name
- validateNameCustomMsg(c.context, directiveNames, d.Name, "UniqueDirectivesPerLocation", func() string {
- return fmt.Sprintf("The directive %q can only be used once at this location.", dirName)
- })
-
- validateArgumentLiterals(c, d.Arguments)
-
- dd, ok := c.schema.Directives[dirName]
- if !ok {
- c.addErr(d.Name.Loc, "KnownDirectives", "Unknown directive %q.", dirName)
- continue
- }
-
- locOK := false
- for _, allowedLoc := range dd.Locations {
- if loc == allowedLoc {
- locOK = true
- break
- }
- }
- if !locOK {
- c.addErr(d.Name.Loc, "KnownDirectives", "Directive %q may not be used on %s.", dirName, loc)
- }
-
- validateArgumentTypes(c, d.Arguments, dd.Arguments, d.Name.Loc,
- func() string { return fmt.Sprintf("directive %q", "@"+dirName) },
- func() string { return fmt.Sprintf("Directive %q", "@"+dirName) },
- )
- }
-}
-
-func validateName(c *context, set nameSet, name types.Ident, rule string, kind string) {
- validateNameCustomMsg(c, set, name, rule, func() string {
- return fmt.Sprintf("There can be only one %s named %q.", kind, name.Name)
- })
-}
-
-func validateNameCustomMsg(c *context, set nameSet, name types.Ident, rule string, msg func() string) {
- if loc, ok := set[name.Name]; ok {
- c.addErrMultiLoc([]errors.Location{loc, name.Loc}, rule, msg())
- return
- }
- set[name.Name] = name.Loc
-}
-
-func validateArgumentTypes(c *opContext, args types.ArgumentList, argDecls types.ArgumentsDefinition, loc errors.Location, owner1, owner2 func() string) {
- for _, selArg := range args {
- arg := argDecls.Get(selArg.Name.Name)
- if arg == nil {
- c.addErr(selArg.Name.Loc, "KnownArgumentNames", "Unknown argument %q on %s.", selArg.Name.Name, owner1())
- continue
- }
- value := selArg.Value
- if ok, reason := validateValueType(c, value, arg.Type); !ok {
- c.addErr(value.Location(), "ArgumentsOfCorrectType", "Argument %q has invalid value %s.\n%s", arg.Name.Name, value, reason)
- }
- }
- for _, decl := range argDecls {
- if _, ok := decl.Type.(*types.NonNull); ok {
- if _, ok := args.Get(decl.Name.Name); !ok {
- c.addErr(loc, "ProvidedNonNullArguments", "%s argument %q of type %q is required but not provided.", owner2(), decl.Name.Name, decl.Type)
- }
- }
- }
-}
-
-func validateArgumentLiterals(c *opContext, args types.ArgumentList) {
- argNames := make(nameSet)
- for _, arg := range args {
- validateName(c.context, argNames, arg.Name, "UniqueArgumentNames", "argument")
- validateLiteral(c, arg.Value)
- }
-}
-
-func validateLiteral(c *opContext, l types.Value) {
- switch l := l.(type) {
- case *types.ObjectValue:
- fieldNames := make(nameSet)
- for _, f := range l.Fields {
- validateName(c.context, fieldNames, f.Name, "UniqueInputFieldNames", "input field")
- validateLiteral(c, f.Value)
- }
- case *types.ListValue:
- for _, entry := range l.Values {
- validateLiteral(c, entry)
- }
- case *types.Variable:
- for _, op := range c.ops {
- v := op.Vars.Get(l.Name)
- if v == nil {
- byOp := ""
- if op.Name.Name != "" {
- byOp = fmt.Sprintf(" by operation %q", op.Name.Name)
- }
- c.opErrs[op] = append(c.opErrs[op], &errors.QueryError{
- Message: fmt.Sprintf("Variable %q is not defined%s.", "$"+l.Name, byOp),
- Locations: []errors.Location{l.Loc, op.Loc},
- Rule: "NoUndefinedVariables",
- })
- continue
- }
- validateValueType(c, l, resolveType(c.context, v.Type))
- c.usedVars[op][v] = struct{}{}
- }
- }
-}
-
-func validateValueType(c *opContext, v types.Value, t types.Type) (bool, string) {
- if v, ok := v.(*types.Variable); ok {
- for _, op := range c.ops {
- if v2 := op.Vars.Get(v.Name); v2 != nil {
- t2, err := common.ResolveType(v2.Type, c.schema.Resolve)
- if _, ok := t2.(*types.NonNull); !ok && v2.Default != nil {
- t2 = &types.NonNull{OfType: t2}
- }
- if err == nil && !typeCanBeUsedAs(t2, t) {
- c.addErrMultiLoc([]errors.Location{v2.Loc, v.Loc}, "VariablesInAllowedPosition", "Variable %q of type %q used in position expecting type %q.", "$"+v.Name, t2, t)
- }
- }
- }
- return true, ""
- }
-
- if nn, ok := t.(*types.NonNull); ok {
- if isNull(v) {
- return false, fmt.Sprintf("Expected %q, found null.", t)
- }
- t = nn.OfType
- }
- if isNull(v) {
- return true, ""
- }
-
- switch t := t.(type) {
- case *types.ScalarTypeDefinition, *types.EnumTypeDefinition:
- if lit, ok := v.(*types.PrimitiveValue); ok {
- if validateBasicLit(lit, t) {
- return true, ""
- }
- return false, fmt.Sprintf("Expected type %q, found %s.", t, v)
- }
- return true, ""
-
- case *types.List:
- list, ok := v.(*types.ListValue)
- if !ok {
- return validateValueType(c, v, t.OfType) // single value instead of list
- }
- for i, entry := range list.Values {
- if ok, reason := validateValueType(c, entry, t.OfType); !ok {
- return false, fmt.Sprintf("In element #%d: %s", i, reason)
- }
- }
- return true, ""
-
- case *types.InputObject:
- v, ok := v.(*types.ObjectValue)
- if !ok {
- return false, fmt.Sprintf("Expected %q, found not an object.", t)
- }
- for _, f := range v.Fields {
- name := f.Name.Name
- iv := t.Values.Get(name)
- if iv == nil {
- return false, fmt.Sprintf("In field %q: Unknown field.", name)
- }
- if ok, reason := validateValueType(c, f.Value, iv.Type); !ok {
- return false, fmt.Sprintf("In field %q: %s", name, reason)
- }
- }
- for _, iv := range t.Values {
- found := false
- for _, f := range v.Fields {
- if f.Name.Name == iv.Name.Name {
- found = true
- break
- }
- }
- if !found {
- if _, ok := iv.Type.(*types.NonNull); ok && iv.Default == nil {
- return false, fmt.Sprintf("In field %q: Expected %q, found null.", iv.Name.Name, iv.Type)
- }
- }
- }
- return true, ""
- }
-
- return false, fmt.Sprintf("Expected type %q, found %s.", t, v)
-}
-
-func validateBasicLit(v *types.PrimitiveValue, t types.Type) bool {
- switch t := t.(type) {
- case *types.ScalarTypeDefinition:
- switch t.Name {
- case "Int":
- if v.Type != scanner.Int {
- return false
- }
- f, err := strconv.ParseFloat(v.Text, 64)
- if err != nil {
- panic(err)
- }
- return f >= math.MinInt32 && f <= math.MaxInt32
- case "Float":
- return v.Type == scanner.Int || v.Type == scanner.Float
- case "String":
- return v.Type == scanner.String
- case "Boolean":
- return v.Type == scanner.Ident && (v.Text == "true" || v.Text == "false")
- case "ID":
- return v.Type == scanner.Int || v.Type == scanner.String
- default:
- //TODO: Type-check against expected type by Unmarshalling
- return true
- }
-
- case *types.EnumTypeDefinition:
- if v.Type != scanner.Ident {
- return false
- }
- for _, option := range t.EnumValuesDefinition {
- if option.EnumValue == v.Text {
- return true
- }
- }
- return false
- }
-
- return false
-}
-
-func canBeFragment(t types.Type) bool {
- switch t.(type) {
- case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
- return true
- default:
- return false
- }
-}
-
-func canBeInput(t types.Type) bool {
- switch t := t.(type) {
- case *types.InputObject, *types.ScalarTypeDefinition, *types.EnumTypeDefinition:
- return true
- case *types.List:
- return canBeInput(t.OfType)
- case *types.NonNull:
- return canBeInput(t.OfType)
- default:
- return false
- }
-}
-
-func hasSubfields(t types.Type) bool {
- switch t := t.(type) {
- case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
- return true
- case *types.List:
- return hasSubfields(t.OfType)
- case *types.NonNull:
- return hasSubfields(t.OfType)
- default:
- return false
- }
-}
-
-func isLeaf(t types.Type) bool {
- switch t.(type) {
- case *types.ScalarTypeDefinition, *types.EnumTypeDefinition:
- return true
- default:
- return false
- }
-}
-
-func isNull(lit interface{}) bool {
- _, ok := lit.(*types.NullValue)
- return ok
-}
-
-func typesCompatible(a, b types.Type) bool {
- al, aIsList := a.(*types.List)
- bl, bIsList := b.(*types.List)
- if aIsList || bIsList {
- return aIsList && bIsList && typesCompatible(al.OfType, bl.OfType)
- }
-
- ann, aIsNN := a.(*types.NonNull)
- bnn, bIsNN := b.(*types.NonNull)
- if aIsNN || bIsNN {
- return aIsNN && bIsNN && typesCompatible(ann.OfType, bnn.OfType)
- }
-
- if isLeaf(a) || isLeaf(b) {
- return a == b
- }
-
- return true
-}
-
-func typeCanBeUsedAs(t, as types.Type) bool {
- nnT, okT := t.(*types.NonNull)
- if okT {
- t = nnT.OfType
- }
-
- nnAs, okAs := as.(*types.NonNull)
- if okAs {
- as = nnAs.OfType
- if !okT {
- return false // nullable can not be used as non-null
- }
- }
-
- if t == as {
- return true
- }
-
- if lT, ok := t.(*types.List); ok {
- if lAs, ok := as.(*types.List); ok {
- return typeCanBeUsedAs(lT.OfType, lAs.OfType)
- }
- }
- return false
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection.go
deleted file mode 100644
index 6877bcaf..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/introspection.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package graphql
-
-import (
- "context"
- "encoding/json"
-
- "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
- "github.com/graph-gophers/graphql-go/introspection"
-)
-
-// Inspect allows inspection of the given schema.
-func (s *Schema) Inspect() *introspection.Schema {
- return introspection.WrapSchema(s.schema)
-}
-
-// ToJSON encodes the schema in a JSON format used by tools like Relay.
-func (s *Schema) ToJSON() ([]byte, error) {
- result := s.exec(context.Background(), introspectionQuery, "", nil, &resolvable.Schema{
- Meta: s.res.Meta,
- Query: &resolvable.Object{},
- Schema: *s.schema,
- })
- if len(result.Errors) != 0 {
- panic(result.Errors[0])
- }
- return json.MarshalIndent(result.Data, "", "\t")
-}
-
-var introspectionQuery = `
- query {
- __schema {
- queryType { name }
- mutationType { name }
- subscriptionType { name }
- types {
- ...FullType
- }
- directives {
- name
- description
- locations
- args {
- ...InputValue
- }
- }
- }
- }
- fragment FullType on __Type {
- kind
- name
- description
- fields(includeDeprecated: true) {
- name
- description
- args {
- ...InputValue
- }
- type {
- ...TypeRef
- }
- isDeprecated
- deprecationReason
- }
- inputFields {
- ...InputValue
- }
- interfaces {
- ...TypeRef
- }
- enumValues(includeDeprecated: true) {
- name
- description
- isDeprecated
- deprecationReason
- }
- possibleTypes {
- ...TypeRef
- }
- }
- fragment InputValue on __InputValue {
- name
- description
- type { ...TypeRef }
- defaultValue
- }
- fragment TypeRef on __Type {
- kind
- name
- ofType {
- kind
- name
- ofType {
- kind
- name
- ofType {
- kind
- name
- ofType {
- kind
- name
- ofType {
- kind
- name
- ofType {
- kind
- name
- ofType {
- kind
- name
- }
- }
- }
- }
- }
- }
- }
- }
-`
diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go
deleted file mode 100644
index a0a2fa9b..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package introspection
-
-import (
- "sort"
-
- "github.com/graph-gophers/graphql-go/types"
-)
-
-type Schema struct {
- schema *types.Schema
-}
-
-// WrapSchema is only used internally.
-func WrapSchema(schema *types.Schema) *Schema {
- return &Schema{schema}
-}
-
-func (r *Schema) Types() []*Type {
- var names []string
- for name := range r.schema.Types {
- names = append(names, name)
- }
- sort.Strings(names)
-
- l := make([]*Type, len(names))
- for i, name := range names {
- l[i] = &Type{r.schema.Types[name]}
- }
- return l
-}
-
-func (r *Schema) Directives() []*Directive {
- var names []string
- for name := range r.schema.Directives {
- names = append(names, name)
- }
- sort.Strings(names)
-
- l := make([]*Directive, len(names))
- for i, name := range names {
- l[i] = &Directive{r.schema.Directives[name]}
- }
- return l
-}
-
-func (r *Schema) QueryType() *Type {
- t, ok := r.schema.EntryPoints["query"]
- if !ok {
- return nil
- }
- return &Type{t}
-}
-
-func (r *Schema) MutationType() *Type {
- t, ok := r.schema.EntryPoints["mutation"]
- if !ok {
- return nil
- }
- return &Type{t}
-}
-
-func (r *Schema) SubscriptionType() *Type {
- t, ok := r.schema.EntryPoints["subscription"]
- if !ok {
- return nil
- }
- return &Type{t}
-}
-
-type Type struct {
- typ types.Type
-}
-
-// WrapType is only used internally.
-func WrapType(typ types.Type) *Type {
- return &Type{typ}
-}
-
-func (r *Type) Kind() string {
- return r.typ.Kind()
-}
-
-func (r *Type) Name() *string {
- if named, ok := r.typ.(types.NamedType); ok {
- name := named.TypeName()
- return &name
- }
- return nil
-}
-
-func (r *Type) Description() *string {
- if named, ok := r.typ.(types.NamedType); ok {
- desc := named.Description()
- if desc == "" {
- return nil
- }
- return &desc
- }
- return nil
-}
-
-func (r *Type) Fields(args *struct{ IncludeDeprecated bool }) *[]*Field {
- var fields types.FieldsDefinition
- switch t := r.typ.(type) {
- case *types.ObjectTypeDefinition:
- fields = t.Fields
- case *types.InterfaceTypeDefinition:
- fields = t.Fields
- default:
- return nil
- }
-
- var l []*Field
- for _, f := range fields {
- if d := f.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated {
- l = append(l, &Field{field: f})
- }
- }
- return &l
-}
-
-func (r *Type) Interfaces() *[]*Type {
- t, ok := r.typ.(*types.ObjectTypeDefinition)
- if !ok {
- return nil
- }
-
- l := make([]*Type, len(t.Interfaces))
- for i, intf := range t.Interfaces {
- l[i] = &Type{intf}
- }
- return &l
-}
-
-func (r *Type) PossibleTypes() *[]*Type {
- var possibleTypes []*types.ObjectTypeDefinition
- switch t := r.typ.(type) {
- case *types.InterfaceTypeDefinition:
- possibleTypes = t.PossibleTypes
- case *types.Union:
- possibleTypes = t.UnionMemberTypes
- default:
- return nil
- }
-
- l := make([]*Type, len(possibleTypes))
- for i, intf := range possibleTypes {
- l[i] = &Type{intf}
- }
- return &l
-}
-
-func (r *Type) EnumValues(args *struct{ IncludeDeprecated bool }) *[]*EnumValue {
- t, ok := r.typ.(*types.EnumTypeDefinition)
- if !ok {
- return nil
- }
-
- var l []*EnumValue
- for _, v := range t.EnumValuesDefinition {
- if d := v.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated {
- l = append(l, &EnumValue{v})
- }
- }
- return &l
-}
-
-func (r *Type) InputFields() *[]*InputValue {
- t, ok := r.typ.(*types.InputObject)
- if !ok {
- return nil
- }
-
- l := make([]*InputValue, len(t.Values))
- for i, v := range t.Values {
- l[i] = &InputValue{v}
- }
- return &l
-}
-
-func (r *Type) OfType() *Type {
- switch t := r.typ.(type) {
- case *types.List:
- return &Type{t.OfType}
- case *types.NonNull:
- return &Type{t.OfType}
- default:
- return nil
- }
-}
-
-type Field struct {
- field *types.FieldDefinition
-}
-
-func (r *Field) Name() string {
- return r.field.Name
-}
-
-func (r *Field) Description() *string {
- if r.field.Desc == "" {
- return nil
- }
- return &r.field.Desc
-}
-
-func (r *Field) Args() []*InputValue {
- l := make([]*InputValue, len(r.field.Arguments))
- for i, v := range r.field.Arguments {
- l[i] = &InputValue{v}
- }
- return l
-}
-
-func (r *Field) Type() *Type {
- return &Type{r.field.Type}
-}
-
-func (r *Field) IsDeprecated() bool {
- return r.field.Directives.Get("deprecated") != nil
-}
-
-func (r *Field) DeprecationReason() *string {
- d := r.field.Directives.Get("deprecated")
- if d == nil {
- return nil
- }
- reason := d.Arguments.MustGet("reason").Deserialize(nil).(string)
- return &reason
-}
-
-type InputValue struct {
- value *types.InputValueDefinition
-}
-
-func (r *InputValue) Name() string {
- return r.value.Name.Name
-}
-
-func (r *InputValue) Description() *string {
- if r.value.Desc == "" {
- return nil
- }
- return &r.value.Desc
-}
-
-func (r *InputValue) Type() *Type {
- return &Type{r.value.Type}
-}
-
-func (r *InputValue) DefaultValue() *string {
- if r.value.Default == nil {
- return nil
- }
- s := r.value.Default.String()
- return &s
-}
-
-type EnumValue struct {
- value *types.EnumValueDefinition
-}
-
-func (r *EnumValue) Name() string {
- return r.value.EnumValue
-}
-
-func (r *EnumValue) Description() *string {
- if r.value.Desc == "" {
- return nil
- }
- return &r.value.Desc
-}
-
-func (r *EnumValue) IsDeprecated() bool {
- return r.value.Directives.Get("deprecated") != nil
-}
-
-func (r *EnumValue) DeprecationReason() *string {
- d := r.value.Directives.Get("deprecated")
- if d == nil {
- return nil
- }
- reason := d.Arguments.MustGet("reason").Deserialize(nil).(string)
- return &reason
-}
-
-type Directive struct {
- directive *types.DirectiveDefinition
-}
-
-func (r *Directive) Name() string {
- return r.directive.Name
-}
-
-func (r *Directive) Description() *string {
- if r.directive.Desc == "" {
- return nil
- }
- return &r.directive.Desc
-}
-
-func (r *Directive) Locations() []string {
- return r.directive.Locations
-}
-
-func (r *Directive) Args() []*InputValue {
- l := make([]*InputValue, len(r.directive.Arguments))
- for i, v := range r.directive.Arguments {
- l[i] = &InputValue{v}
- }
- return l
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/log/log.go b/vendor/github.com/graph-gophers/graphql-go/log/log.go
deleted file mode 100644
index bdada874..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/log/log.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package log
-
-import (
- "context"
- "log"
- "runtime"
-)
-
-// Logger is the interface used to log panics that occur during query execution. It is settable via graphql.ParseSchema
-type Logger interface {
- LogPanic(ctx context.Context, value interface{})
-}
-
-// DefaultLogger is the default logger used to log panics that occur during query execution
-type DefaultLogger struct{}
-
-// LogPanic is used to log recovered panic values that occur during query execution
-func (l *DefaultLogger) LogPanic(ctx context.Context, value interface{}) {
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("graphql: panic occurred: %v\n%s\ncontext: %v", value, buf, ctx)
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/nullable_types.go b/vendor/github.com/graph-gophers/graphql-go/nullable_types.go
deleted file mode 100644
index fa5bbfd6..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/nullable_types.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package graphql
-
-import (
- "fmt"
- "math"
-)
-
-// NullString is a string that can be null. Use it in input structs to
-// differentiate a value explicitly set to null from an omitted value.
-// When the value is defined (either null or a value) Set is true.
-type NullString struct {
- Value *string
- Set bool
-}
-
-func (NullString) ImplementsGraphQLType(name string) bool {
- return name == "String"
-}
-
-func (s *NullString) UnmarshalGraphQL(input interface{}) error {
- s.Set = true
-
- if input == nil {
- return nil
- }
-
- switch v := input.(type) {
- case string:
- s.Value = &v
- return nil
- default:
- return fmt.Errorf("wrong type for String: %T", v)
- }
-}
-
-func (s *NullString) Nullable() {}
-
-// NullBool is a string that can be null. Use it in input structs to
-// differentiate a value explicitly set to null from an omitted value.
-// When the value is defined (either null or a value) Set is true.
-type NullBool struct {
- Value *bool
- Set bool
-}
-
-func (NullBool) ImplementsGraphQLType(name string) bool {
- return name == "Boolean"
-}
-
-func (s *NullBool) UnmarshalGraphQL(input interface{}) error {
- s.Set = true
-
- if input == nil {
- return nil
- }
-
- switch v := input.(type) {
- case bool:
- s.Value = &v
- return nil
- default:
- return fmt.Errorf("wrong type for Boolean: %T", v)
- }
-}
-
-func (s *NullBool) Nullable() {}
-
-// NullInt is a string that can be null. Use it in input structs to
-// differentiate a value explicitly set to null from an omitted value.
-// When the value is defined (either null or a value) Set is true.
-type NullInt struct {
- Value *int32
- Set bool
-}
-
-func (NullInt) ImplementsGraphQLType(name string) bool {
- return name == "Int"
-}
-
-func (s *NullInt) UnmarshalGraphQL(input interface{}) error {
- s.Set = true
-
- if input == nil {
- return nil
- }
-
- switch v := input.(type) {
- case int32:
- s.Value = &v
- return nil
- case float64:
- coerced := int32(v)
- if v < math.MinInt32 || v > math.MaxInt32 || float64(coerced) != v {
- return fmt.Errorf("not a 32-bit integer")
- }
- s.Value = &coerced
- return nil
- default:
- return fmt.Errorf("wrong type for Int: %T", v)
- }
-}
-
-func (s *NullInt) Nullable() {}
-
-// NullFloat is a string that can be null. Use it in input structs to
-// differentiate a value explicitly set to null from an omitted value.
-// When the value is defined (either null or a value) Set is true.
-type NullFloat struct {
- Value *float64
- Set bool
-}
-
-func (NullFloat) ImplementsGraphQLType(name string) bool {
- return name == "Float"
-}
-
-func (s *NullFloat) UnmarshalGraphQL(input interface{}) error {
- s.Set = true
-
- if input == nil {
- return nil
- }
-
- switch v := input.(type) {
- case float64:
- s.Value = &v
- return nil
- case int32:
- coerced := float64(v)
- s.Value = &coerced
- return nil
- case int:
- coerced := float64(v)
- s.Value = &coerced
- return nil
- default:
- return fmt.Errorf("wrong type for Float: %T", v)
- }
-}
-
-func (s *NullFloat) Nullable() {}
-
-// NullTime is a string that can be null. Use it in input structs to
-// differentiate a value explicitly set to null from an omitted value.
-// When the value is defined (either null or a value) Set is true.
-type NullTime struct {
- Value *Time
- Set bool
-}
-
-func (NullTime) ImplementsGraphQLType(name string) bool {
- return name == "Time"
-}
-
-func (s *NullTime) UnmarshalGraphQL(input interface{}) error {
- s.Set = true
-
- if input == nil {
- return nil
- }
-
- s.Value = new(Time)
- return s.Value.UnmarshalGraphQL(input)
-}
-
-func (s *NullTime) Nullable() {}
diff --git a/vendor/github.com/graph-gophers/graphql-go/subscriptions.go b/vendor/github.com/graph-gophers/graphql-go/subscriptions.go
deleted file mode 100644
index 34064dc7..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/subscriptions.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package graphql
-
-import (
- "context"
- "errors"
-
- qerrors "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/internal/common"
- "github.com/graph-gophers/graphql-go/internal/exec"
- "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
- "github.com/graph-gophers/graphql-go/internal/exec/selected"
- "github.com/graph-gophers/graphql-go/internal/query"
- "github.com/graph-gophers/graphql-go/internal/validation"
- "github.com/graph-gophers/graphql-go/introspection"
-)
-
-// Subscribe returns a response channel for the given subscription with the schema's
-// resolver. It returns an error if the schema was created without a resolver.
-// If the context gets cancelled, the response channel will be closed and no
-// further resolvers will be called. The context error will be returned as soon
-// as possible (not immediately).
-func (s *Schema) Subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) (<-chan interface{}, error) {
- if !s.res.Resolver.IsValid() {
- return nil, errors.New("schema created without resolver, can not subscribe")
- }
- if _, ok := s.schema.EntryPoints["subscription"]; !ok {
- return nil, errors.New("no subscriptions are offered by the schema")
- }
- return s.subscribe(ctx, queryString, operationName, variables, s.res), nil
-}
-
-func (s *Schema) subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) <-chan interface{} {
- doc, qErr := query.Parse(queryString)
- if qErr != nil {
- return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qErr}})
- }
-
- validationFinish := s.validationTracer.TraceValidation(ctx)
- errs := validation.Validate(s.schema, doc, variables, s.maxDepth)
- validationFinish(errs)
- if len(errs) != 0 {
- return sendAndReturnClosed(&Response{Errors: errs})
- }
-
- op, err := getOperation(doc, operationName)
- if err != nil {
- return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qerrors.Errorf("%s", err)}})
- }
-
- r := &exec.Request{
- Request: selected.Request{
- Doc: doc,
- Vars: variables,
- Schema: s.schema,
- },
- Limiter: make(chan struct{}, s.maxParallelism),
- Tracer: s.tracer,
- Logger: s.logger,
- PanicHandler: s.panicHandler,
- SubscribeResolverTimeout: s.subscribeResolverTimeout,
- }
- varTypes := make(map[string]*introspection.Type)
- for _, v := range op.Vars {
- t, err := common.ResolveType(v.Type, s.schema.Resolve)
- if err != nil {
- return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{err}})
- }
- varTypes[v.Name.Name] = introspection.WrapType(t)
- }
-
- if op.Type == query.Query || op.Type == query.Mutation {
- data, errs := r.Execute(ctx, res, op)
- return sendAndReturnClosed(&Response{Data: data, Errors: errs})
- }
-
- responses := r.Subscribe(ctx, res, op)
- c := make(chan interface{})
- go func() {
- for resp := range responses {
- c <- &Response{
- Data: resp.Data,
- Errors: resp.Errors,
- }
- }
- close(c)
- }()
-
- return c
-}
-
-func sendAndReturnClosed(resp *Response) chan interface{} {
- c := make(chan interface{}, 1)
- c <- resp
- close(c)
- return c
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/time.go b/vendor/github.com/graph-gophers/graphql-go/time.go
deleted file mode 100644
index 974287e7..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/time.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package graphql
-
-import (
- "encoding/json"
- "fmt"
- "time"
-)
-
-// Time is a custom GraphQL type to represent an instant in time. It has to be added to a schema
-// via "scalar Time" since it is not a predeclared GraphQL type like "ID".
-type Time struct {
- time.Time
-}
-
-// ImplementsGraphQLType maps this custom Go type
-// to the graphql scalar type in the schema.
-func (Time) ImplementsGraphQLType(name string) bool {
- return name == "Time"
-}
-
-// UnmarshalGraphQL is a custom unmarshaler for Time
-//
-// This function will be called whenever you use the
-// time scalar as an input
-func (t *Time) UnmarshalGraphQL(input interface{}) error {
- switch input := input.(type) {
- case time.Time:
- t.Time = input
- return nil
- case string:
- var err error
- t.Time, err = time.Parse(time.RFC3339, input)
- return err
- case []byte:
- var err error
- t.Time, err = time.Parse(time.RFC3339, string(input))
- return err
- case int32:
- t.Time = time.Unix(int64(input), 0)
- return nil
- case int64:
- if input >= 1e10 {
- sec := input / 1e9
- nsec := input - (sec * 1e9)
- t.Time = time.Unix(sec, nsec)
- } else {
- t.Time = time.Unix(input, 0)
- }
- return nil
- case float64:
- t.Time = time.Unix(int64(input), 0)
- return nil
- default:
- return fmt.Errorf("wrong type for Time: %T", input)
- }
-}
-
-// MarshalJSON is a custom marshaler for Time
-//
-// This function will be called whenever you
-// query for fields that use the Time type
-func (t Time) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.Time)
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go
deleted file mode 100644
index 8d5d8a71..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/trace/trace.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package trace
-
-import (
- "context"
- "fmt"
-
- "github.com/graph-gophers/graphql-go/errors"
- "github.com/graph-gophers/graphql-go/introspection"
- opentracing "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
- "github.com/opentracing/opentracing-go/log"
-)
-
-type TraceQueryFinishFunc func([]*errors.QueryError)
-type TraceFieldFinishFunc func(*errors.QueryError)
-
-type Tracer interface {
- TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc)
- TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc)
-}
-
-type OpenTracingTracer struct{}
-
-func (OpenTracingTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) {
- span, spanCtx := opentracing.StartSpanFromContext(ctx, "GraphQL request")
- span.SetTag("graphql.query", queryString)
-
- if operationName != "" {
- span.SetTag("graphql.operationName", operationName)
- }
-
- if len(variables) != 0 {
- span.LogFields(log.Object("graphql.variables", variables))
- }
-
- return spanCtx, func(errs []*errors.QueryError) {
- if len(errs) > 0 {
- msg := errs[0].Error()
- if len(errs) > 1 {
- msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1)
- }
- ext.Error.Set(span, true)
- span.SetTag("graphql.error", msg)
- }
- span.Finish()
- }
-}
-
-func (OpenTracingTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) {
- if trivial {
- return ctx, noop
- }
-
- span, spanCtx := opentracing.StartSpanFromContext(ctx, label)
- span.SetTag("graphql.type", typeName)
- span.SetTag("graphql.field", fieldName)
- for name, value := range args {
- span.SetTag("graphql.args."+name, value)
- }
-
- return spanCtx, func(err *errors.QueryError) {
- if err != nil {
- ext.Error.Set(span, true)
- span.SetTag("graphql.error", err.Error())
- }
- span.Finish()
- }
-}
-
-func (OpenTracingTracer) TraceValidation(ctx context.Context) TraceValidationFinishFunc {
- span, _ := opentracing.StartSpanFromContext(ctx, "Validate Query")
-
- return func(errs []*errors.QueryError) {
- if len(errs) > 0 {
- msg := errs[0].Error()
- if len(errs) > 1 {
- msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1)
- }
- ext.Error.Set(span, true)
- span.SetTag("graphql.error", msg)
- }
- span.Finish()
- }
-}
-
-func noop(*errors.QueryError) {}
-
-type NoopTracer struct{}
-
-func (NoopTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) {
- return ctx, func(errs []*errors.QueryError) {}
-}
-
-func (NoopTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) {
- return ctx, func(err *errors.QueryError) {}
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go
deleted file mode 100644
index bce7a9a4..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package trace
-
-import (
- "context"
-
- "github.com/graph-gophers/graphql-go/errors"
-)
-
-type TraceValidationFinishFunc = TraceQueryFinishFunc
-
-// Deprecated: use ValidationTracerContext.
-type ValidationTracer interface {
- TraceValidation() TraceValidationFinishFunc
-}
-
-type ValidationTracerContext interface {
- TraceValidation(ctx context.Context) TraceValidationFinishFunc
-}
-
-type NoopValidationTracer struct{}
-
-// Deprecated: use a Tracer which implements ValidationTracerContext.
-func (NoopValidationTracer) TraceValidation() TraceValidationFinishFunc {
- return func(errs []*errors.QueryError) {}
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/argument.go b/vendor/github.com/graph-gophers/graphql-go/types/argument.go
deleted file mode 100644
index b2681a28..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/argument.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package types
-
-// Argument is a representation of the GraphQL Argument.
-//
-// https://spec.graphql.org/draft/#sec-Language.Arguments
-type Argument struct {
- Name Ident
- Value Value
-}
-
-// ArgumentList is a collection of GraphQL Arguments.
-type ArgumentList []*Argument
-
-// Returns a Value in the ArgumentList by name.
-func (l ArgumentList) Get(name string) (Value, bool) {
- for _, arg := range l {
- if arg.Name.Name == name {
- return arg.Value, true
- }
- }
- return nil, false
-}
-
-// MustGet returns a Value in the ArgumentList by name.
-// MustGet will panic if the argument name is not found in the ArgumentList.
-func (l ArgumentList) MustGet(name string) Value {
- value, ok := l.Get(name)
- if !ok {
- panic("argument not found")
- }
- return value
-}
-
-type ArgumentsDefinition []*InputValueDefinition
-
-// Get returns an InputValueDefinition in the ArgumentsDefinition by name or nil if not found.
-func (a ArgumentsDefinition) Get(name string) *InputValueDefinition {
- for _, inputValue := range a {
- if inputValue.Name.Name == name {
- return inputValue
- }
- }
- return nil
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/directive.go b/vendor/github.com/graph-gophers/graphql-go/types/directive.go
deleted file mode 100644
index 0f8a4b99..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/directive.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// Directive is a representation of the GraphQL Directive.
-//
-// http://spec.graphql.org/draft/#sec-Language.Directives
-type Directive struct {
- Name Ident
- Arguments ArgumentList
-}
-
-// DirectiveDefinition is a representation of the GraphQL DirectiveDefinition.
-//
-// http://spec.graphql.org/draft/#sec-Type-System.Directives
-type DirectiveDefinition struct {
- Name string
- Desc string
- Locations []string
- Arguments ArgumentsDefinition
- Loc errors.Location
-}
-
-type DirectiveList []*Directive
-
-// Returns the Directive in the DirectiveList by name or nil if not found.
-func (l DirectiveList) Get(name string) *Directive {
- for _, d := range l {
- if d.Name.Name == name {
- return d
- }
- }
- return nil
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/doc.go b/vendor/github.com/graph-gophers/graphql-go/types/doc.go
deleted file mode 100644
index 87caa60b..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- Package types represents all types from the GraphQL specification in code.
-
-
- The names of the Go types, whenever possible, match 1:1 with the names from
- the specification.
-
-*/
-package types
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/enum.go b/vendor/github.com/graph-gophers/graphql-go/types/enum.go
deleted file mode 100644
index b2c84caa..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/enum.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// EnumTypeDefinition defines a set of possible enum values.
-//
-// Like scalar types, an EnumTypeDefinition also represents a leaf value in a GraphQL type system.
-//
-// http://spec.graphql.org/draft/#sec-Enums
-type EnumTypeDefinition struct {
- Name string
- EnumValuesDefinition []*EnumValueDefinition
- Desc string
- Directives DirectiveList
- Loc errors.Location
-}
-
-// EnumValueDefinition are unique values that may be serialized as a string: the name of the
-// represented value.
-//
-// http://spec.graphql.org/draft/#EnumValueDefinition
-type EnumValueDefinition struct {
- EnumValue string
- Directives DirectiveList
- Desc string
- Loc errors.Location
-}
-
-func (*EnumTypeDefinition) Kind() string { return "ENUM" }
-func (t *EnumTypeDefinition) String() string { return t.Name }
-func (t *EnumTypeDefinition) TypeName() string { return t.Name }
-func (t *EnumTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/extension.go b/vendor/github.com/graph-gophers/graphql-go/types/extension.go
deleted file mode 100644
index b82ea670..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/extension.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// Extension type defines a GraphQL type extension.
-// Schemas, Objects, Inputs and Scalars can be extended.
-//
-// https://spec.graphql.org/draft/#sec-Type-System-Extensions
-type Extension struct {
- Type NamedType
- Directives DirectiveList
- Loc errors.Location
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/field.go b/vendor/github.com/graph-gophers/graphql-go/types/field.go
deleted file mode 100644
index ea5bca5c..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/field.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// FieldDefinition is a representation of a GraphQL FieldDefinition.
-//
-// http://spec.graphql.org/draft/#FieldDefinition
-type FieldDefinition struct {
- Name string
- Arguments ArgumentsDefinition
- Type Type
- Directives DirectiveList
- Desc string
- Loc errors.Location
-}
-
-// FieldsDefinition is a list of an ObjectTypeDefinition's Fields.
-//
-// https://spec.graphql.org/draft/#FieldsDefinition
-type FieldsDefinition []*FieldDefinition
-
-// Get returns a FieldDefinition in a FieldsDefinition by name or nil if not found.
-func (l FieldsDefinition) Get(name string) *FieldDefinition {
- for _, f := range l {
- if f.Name == name {
- return f
- }
- }
- return nil
-}
-
-// Names returns a slice of FieldDefinition names.
-func (l FieldsDefinition) Names() []string {
- names := make([]string, len(l))
- for i, f := range l {
- names[i] = f.Name
- }
- return names
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/fragment.go b/vendor/github.com/graph-gophers/graphql-go/types/fragment.go
deleted file mode 100644
index 606219ca..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/fragment.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-type Fragment struct {
- On TypeName
- Selections SelectionSet
-}
-
-// InlineFragment is a representation of the GraphQL InlineFragment.
-//
-// http://spec.graphql.org/draft/#InlineFragment
-type InlineFragment struct {
- Fragment
- Directives DirectiveList
- Loc errors.Location
-}
-
-// FragmentDefinition is a representation of the GraphQL FragmentDefinition.
-//
-// http://spec.graphql.org/draft/#FragmentDefinition
-type FragmentDefinition struct {
- Fragment
- Name Ident
- Directives DirectiveList
- Loc errors.Location
-}
-
-// FragmentSpread is a representation of the GraphQL FragmentSpread.
-//
-// http://spec.graphql.org/draft/#FragmentSpread
-type FragmentSpread struct {
- Name Ident
- Directives DirectiveList
- Loc errors.Location
-}
-
-type FragmentList []*FragmentDefinition
-
-// Returns a FragmentDefinition by name or nil if not found.
-func (l FragmentList) Get(name string) *FragmentDefinition {
- for _, f := range l {
- if f.Name.Name == name {
- return f
- }
- }
- return nil
-}
-
-func (InlineFragment) isSelection() {}
-func (FragmentSpread) isSelection() {}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/input.go b/vendor/github.com/graph-gophers/graphql-go/types/input.go
deleted file mode 100644
index c179bc3e..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/input.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// InputValueDefinition is a representation of the GraphQL InputValueDefinition.
-//
-// http://spec.graphql.org/draft/#InputValueDefinition
-type InputValueDefinition struct {
- Name Ident
- Type Type
- Default Value
- Desc string
- Directives DirectiveList
- Loc errors.Location
- TypeLoc errors.Location
-}
-
-type InputValueDefinitionList []*InputValueDefinition
-
-// Returns an InputValueDefinition by name or nil if not found.
-func (l InputValueDefinitionList) Get(name string) *InputValueDefinition {
- for _, v := range l {
- if v.Name.Name == name {
- return v
- }
- }
- return nil
-}
-
-// InputObject types define a set of input fields; the input fields are either scalars, enums, or
-// other input objects.
-//
-// This allows arguments to accept arbitrarily complex structs.
-//
-// http://spec.graphql.org/draft/#sec-Input-Objects
-type InputObject struct {
- Name string
- Desc string
- Values ArgumentsDefinition
- Directives DirectiveList
- Loc errors.Location
-}
-
-func (*InputObject) Kind() string { return "INPUT_OBJECT" }
-func (t *InputObject) String() string { return t.Name }
-func (t *InputObject) TypeName() string { return t.Name }
-func (t *InputObject) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/interface.go b/vendor/github.com/graph-gophers/graphql-go/types/interface.go
deleted file mode 100644
index e741e591..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/interface.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// InterfaceTypeDefinition recusrively defines list of named fields with their arguments via the
-// implementation chain of interfaces.
-//
-// GraphQL objects can then implement these interfaces which requires that the object type will
-// define all fields defined by those interfaces.
-//
-// http://spec.graphql.org/draft/#sec-Interfaces
-type InterfaceTypeDefinition struct {
- Name string
- PossibleTypes []*ObjectTypeDefinition
- Fields FieldsDefinition
- Desc string
- Directives DirectiveList
- Loc errors.Location
- Interfaces []*InterfaceTypeDefinition
-}
-
-func (*InterfaceTypeDefinition) Kind() string { return "INTERFACE" }
-func (t *InterfaceTypeDefinition) String() string { return t.Name }
-func (t *InterfaceTypeDefinition) TypeName() string { return t.Name }
-func (t *InterfaceTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/object.go b/vendor/github.com/graph-gophers/graphql-go/types/object.go
deleted file mode 100644
index e65c79db..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/object.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// ObjectTypeDefinition represents a GraphQL ObjectTypeDefinition.
-//
-// type FooObject {
-// foo: String
-// }
-//
-// https://spec.graphql.org/draft/#sec-Objects
-type ObjectTypeDefinition struct {
- Name string
- Interfaces []*InterfaceTypeDefinition
- Fields FieldsDefinition
- Desc string
- Directives DirectiveList
- InterfaceNames []string
- Loc errors.Location
-}
-
-func (*ObjectTypeDefinition) Kind() string { return "OBJECT" }
-func (t *ObjectTypeDefinition) String() string { return t.Name }
-func (t *ObjectTypeDefinition) TypeName() string { return t.Name }
-func (t *ObjectTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/query.go b/vendor/github.com/graph-gophers/graphql-go/types/query.go
deleted file mode 100644
index caca6ef4..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/query.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// ExecutableDefinition represents a set of operations or fragments that can be executed
-// against a schema.
-//
-// http://spec.graphql.org/draft/#ExecutableDefinition
-type ExecutableDefinition struct {
- Operations OperationList
- Fragments FragmentList
-}
-
-// OperationDefinition represents a GraphQL Operation.
-//
-// https://spec.graphql.org/draft/#sec-Language.Operations
-type OperationDefinition struct {
- Type OperationType
- Name Ident
- Vars ArgumentsDefinition
- Selections SelectionSet
- Directives DirectiveList
- Loc errors.Location
-}
-
-type OperationType string
-
-// A Selection is a field requested in a GraphQL operation.
-//
-// http://spec.graphql.org/draft/#Selection
-type Selection interface {
- isSelection()
-}
-
-// A SelectionSet represents a collection of Selections
-//
-// http://spec.graphql.org/draft/#sec-Selection-Sets
-type SelectionSet []Selection
-
-// Field represents a field used in a query.
-type Field struct {
- Alias Ident
- Name Ident
- Arguments ArgumentList
- Directives DirectiveList
- SelectionSet SelectionSet
- SelectionSetLoc errors.Location
-}
-
-func (Field) isSelection() {}
-
-type OperationList []*OperationDefinition
-
-// Get returns an OperationDefinition by name or nil if not found.
-func (l OperationList) Get(name string) *OperationDefinition {
- for _, f := range l {
- if f.Name.Name == name {
- return f
- }
- }
- return nil
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/scalar.go b/vendor/github.com/graph-gophers/graphql-go/types/scalar.go
deleted file mode 100644
index 5bd529a8..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/scalar.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// ScalarTypeDefinition types represent primitive leaf values (e.g. a string or an integer) in a GraphQL type
-// system.
-//
-// GraphQL responses take the form of a hierarchical tree; the leaves on these trees are GraphQL
-// scalars.
-//
-// http://spec.graphql.org/draft/#sec-Scalars
-type ScalarTypeDefinition struct {
- Name string
- Desc string
- Directives DirectiveList
- Loc errors.Location
-}
-
-func (*ScalarTypeDefinition) Kind() string { return "SCALAR" }
-func (t *ScalarTypeDefinition) String() string { return t.Name }
-func (t *ScalarTypeDefinition) TypeName() string { return t.Name }
-func (t *ScalarTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/schema.go b/vendor/github.com/graph-gophers/graphql-go/types/schema.go
deleted file mode 100644
index 06811a97..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/schema.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package types
-
-// Schema represents a GraphQL service's collective type system capabilities.
-// A schema is defined in terms of the types and directives it supports as well as the root
-// operation types for each kind of operation: `query`, `mutation`, and `subscription`.
-//
-// For a more formal definition, read the relevant section in the specification:
-//
-// http://spec.graphql.org/draft/#sec-Schema
-type Schema struct {
- // EntryPoints determines the place in the type system where `query`, `mutation`, and
- // `subscription` operations begin.
- //
- // http://spec.graphql.org/draft/#sec-Root-Operation-Types
- //
- EntryPoints map[string]NamedType
-
- // Types are the fundamental unit of any GraphQL schema.
- // There are six kinds of named types, and two wrapping types.
- //
- // http://spec.graphql.org/draft/#sec-Types
- Types map[string]NamedType
-
- // Directives are used to annotate various parts of a GraphQL document as an indicator that they
- // should be evaluated differently by a validator, executor, or client tool such as a code
- // generator.
- //
- // http://spec.graphql.org/#sec-Type-System.Directives
- Directives map[string]*DirectiveDefinition
-
- UseFieldResolvers bool
-
- EntryPointNames map[string]string
- Objects []*ObjectTypeDefinition
- Unions []*Union
- Enums []*EnumTypeDefinition
- Extensions []*Extension
-}
-
-func (s *Schema) Resolve(name string) Type {
- return s.Types[name]
-}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/types.go b/vendor/github.com/graph-gophers/graphql-go/types/types.go
deleted file mode 100644
index df34d08a..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/types.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package types
-
-import (
- "github.com/graph-gophers/graphql-go/errors"
-)
-
-// TypeName is a base building block for GraphQL type references.
-type TypeName struct {
- Ident
-}
-
-// NamedType represents a type with a name.
-//
-// http://spec.graphql.org/draft/#NamedType
-type NamedType interface {
- Type
- TypeName() string
- Description() string
-}
-
-type Ident struct {
- Name string
- Loc errors.Location
-}
-
-type Type interface {
- // Kind returns one possible GraphQL type kind. A type kind must be
- // valid as defined by the GraphQL spec.
- //
- // https://spec.graphql.org/draft/#sec-Type-Kinds
- Kind() string
-
- // String serializes a Type into a GraphQL specification format type.
- //
- // http://spec.graphql.org/draft/#sec-Serialization-Format
- String() string
-}
-
-// List represents a GraphQL ListType.
-//
-// http://spec.graphql.org/draft/#ListType
-type List struct {
- // OfType represents the inner-type of a List type.
- // For example, the List type `[Foo]` has an OfType of Foo.
- OfType Type
-}
-
-// NonNull represents a GraphQL NonNullType.
-//
-// https://spec.graphql.org/draft/#NonNullType
-type NonNull struct {
- // OfType represents the inner-type of a NonNull type.
- // For example, the NonNull type `Foo!` has an OfType of Foo.
- OfType Type
-}
-
-func (*List) Kind() string { return "LIST" }
-func (*NonNull) Kind() string { return "NON_NULL" }
-func (*TypeName) Kind() string { panic("TypeName needs to be resolved to actual type") }
-
-func (t *List) String() string { return "[" + t.OfType.String() + "]" }
-func (t *NonNull) String() string { return t.OfType.String() + "!" }
-func (*TypeName) String() string { panic("TypeName needs to be resolved to actual type") }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/union.go b/vendor/github.com/graph-gophers/graphql-go/types/union.go
deleted file mode 100644
index bb916673..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/union.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// Union types represent objects that could be one of a list of GraphQL object types, but provides no
-// guaranteed fields between those types.
-//
-// They also differ from interfaces in that object types declare what interfaces they implement, but
-// are not aware of what unions contain them.
-//
-// http://spec.graphql.org/draft/#sec-Unions
-type Union struct {
- Name string
- UnionMemberTypes []*ObjectTypeDefinition
- Desc string
- Directives DirectiveList
- TypeNames []string
- Loc errors.Location
-}
-
-func (*Union) Kind() string { return "UNION" }
-func (t *Union) String() string { return t.Name }
-func (t *Union) TypeName() string { return t.Name }
-func (t *Union) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/value.go b/vendor/github.com/graph-gophers/graphql-go/types/value.go
deleted file mode 100644
index 9f8d041a..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/value.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package types
-
-import (
- "strconv"
- "strings"
- "text/scanner"
-
- "github.com/graph-gophers/graphql-go/errors"
-)
-
-// Value represents a literal input or literal default value in the GraphQL Specification.
-//
-// http://spec.graphql.org/draft/#sec-Input-Values
-type Value interface {
- // Deserialize transforms a GraphQL specification format literal into a Go type.
- Deserialize(vars map[string]interface{}) interface{}
-
- // String serializes a Value into a GraphQL specification format literal.
- String() string
- Location() errors.Location
-}
-
-// PrimitiveValue represents one of the following GraphQL scalars: Int, Float,
-// String, or Boolean
-type PrimitiveValue struct {
- Type rune
- Text string
- Loc errors.Location
-}
-
-func (val *PrimitiveValue) Deserialize(vars map[string]interface{}) interface{} {
- switch val.Type {
- case scanner.Int:
- value, err := strconv.ParseInt(val.Text, 10, 32)
- if err != nil {
- panic(err)
- }
- return int32(value)
-
- case scanner.Float:
- value, err := strconv.ParseFloat(val.Text, 64)
- if err != nil {
- panic(err)
- }
- return value
-
- case scanner.String:
- value, err := strconv.Unquote(val.Text)
- if err != nil {
- panic(err)
- }
- return value
-
- case scanner.Ident:
- switch val.Text {
- case "true":
- return true
- case "false":
- return false
- default:
- return val.Text
- }
-
- default:
- panic("invalid literal value")
- }
-}
-
-func (val *PrimitiveValue) String() string { return val.Text }
-func (val *PrimitiveValue) Location() errors.Location { return val.Loc }
-
-// ListValue represents a literal list Value in the GraphQL specification.
-//
-// http://spec.graphql.org/draft/#sec-List-Value
-type ListValue struct {
- Values []Value
- Loc errors.Location
-}
-
-func (val *ListValue) Deserialize(vars map[string]interface{}) interface{} {
- entries := make([]interface{}, len(val.Values))
- for i, entry := range val.Values {
- entries[i] = entry.Deserialize(vars)
- }
- return entries
-}
-
-func (val *ListValue) String() string {
- entries := make([]string, len(val.Values))
- for i, entry := range val.Values {
- entries[i] = entry.String()
- }
- return "[" + strings.Join(entries, ", ") + "]"
-}
-
-func (val *ListValue) Location() errors.Location { return val.Loc }
-
-// ObjectValue represents a literal object Value in the GraphQL specification.
-//
-// http://spec.graphql.org/draft/#sec-Object-Value
-type ObjectValue struct {
- Fields []*ObjectField
- Loc errors.Location
-}
-
-// ObjectField represents field/value pairs in a literal ObjectValue.
-type ObjectField struct {
- Name Ident
- Value Value
-}
-
-func (val *ObjectValue) Deserialize(vars map[string]interface{}) interface{} {
- fields := make(map[string]interface{}, len(val.Fields))
- for _, f := range val.Fields {
- fields[f.Name.Name] = f.Value.Deserialize(vars)
- }
- return fields
-}
-
-func (val *ObjectValue) String() string {
- entries := make([]string, 0, len(val.Fields))
- for _, f := range val.Fields {
- entries = append(entries, f.Name.Name+": "+f.Value.String())
- }
- return "{" + strings.Join(entries, ", ") + "}"
-}
-
-func (val *ObjectValue) Location() errors.Location {
- return val.Loc
-}
-
-// NullValue represents a literal `null` Value in the GraphQL specification.
-//
-// http://spec.graphql.org/draft/#sec-Null-Value
-type NullValue struct {
- Loc errors.Location
-}
-
-func (val *NullValue) Deserialize(vars map[string]interface{}) interface{} { return nil }
-func (val *NullValue) String() string { return "null" }
-func (val *NullValue) Location() errors.Location { return val.Loc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/variable.go b/vendor/github.com/graph-gophers/graphql-go/types/variable.go
deleted file mode 100644
index 1a4e2a51..00000000
--- a/vendor/github.com/graph-gophers/graphql-go/types/variable.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package types
-
-import "github.com/graph-gophers/graphql-go/errors"
-
-// Variable is used in GraphQL operations to parameterize an input value.
-//
-// http://spec.graphql.org/draft/#Variable
-type Variable struct {
- Name string
- Loc errors.Location
-}
-
-func (v Variable) Deserialize(vars map[string]interface{}) interface{} { return vars[v.Name] }
-func (v Variable) String() string { return "$" + v.Name }
-func (v *Variable) Location() errors.Location { return v.Loc }
diff --git a/vendor/github.com/grokify/html-strip-tags-go/.golangci.yaml b/vendor/github.com/grokify/html-strip-tags-go/.golangci.yaml
new file mode 100644
index 00000000..112c8179
--- /dev/null
+++ b/vendor/github.com/grokify/html-strip-tags-go/.golangci.yaml
@@ -0,0 +1,13 @@
+linters:
+ enable:
+ - dogsled
+ - dupl
+ - gofmt
+ - goimports
+ - gosec
+ - misspell
+ - nakedret
+ - stylecheck
+ - unconvert
+ - unparam
+ - whitespace
diff --git a/vendor/github.com/grokify/html-strip-tags-go/.travis.yml b/vendor/github.com/grokify/html-strip-tags-go/.travis.yml
deleted file mode 100644
index 0fe1369b..00000000
--- a/vendor/github.com/grokify/html-strip-tags-go/.travis.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-
-go_import_path: github.com/grokify/html-strip-tags-go
-go:
- - "1.15.x"
- - "1.14.x"
- - "1.13.x"
- - "1.12.x"
- - "1.11.x"
- - "1.10.x"
- - "1.9.x"
- - "1.8.x"
- - "1.7.x"
- - "1.6.x"
- - "1.5.x"
- - "1.4.x"
- - "1.3.x"
- - "1.2.x"
- - tip
-matrix:
- allow_failures:
- - go: "1.4.x"
- - go: "1.3.x"
- - go: "1.2.x"
- - go: tip
-
-install:
- - go get -t -v ./...
-script:
- - go test -v ./...
-
diff --git a/vendor/github.com/grokify/html-strip-tags-go/README.md b/vendor/github.com/grokify/html-strip-tags-go/README.md
index 8a5ac946..911373fc 100644
--- a/vendor/github.com/grokify/html-strip-tags-go/README.md
+++ b/vendor/github.com/grokify/html-strip-tags-go/README.md
@@ -40,7 +40,7 @@ func main() {
[used-by-link]: https://sourcegraph.com/github.com/grokify/html-strip-tags-go?badge
[goreport-svg]: https://goreportcard.com/badge/github.com/grokify/html-strip-tags-go
[goreport-link]: https://goreportcard.com/report/github.com/grokify/html-strip-tags-go
- [build-status-svg]: https://github.com/grokify/html-strip-tags-go/workflows/build/badge.svg
+ [build-status-svg]: https://github.com/grokify/html-strip-tags-go/workflows/test/badge.svg
[build-status-link]: https://github.com/grokify/html-strip-tags-go/actions
[coverage-status-svg]: https://coveralls.io/repos/grokify/html-strip-tags-go/badge.svg?branch=master
[coverage-status-link]: https://coveralls.io/r/grokify/html-strip-tags-go?branch=master
diff --git a/vendor/github.com/grokify/html-strip-tags-go/strip.go b/vendor/github.com/grokify/html-strip-tags-go/strip.go
index 7240bcd4..030d2818 100644
--- a/vendor/github.com/grokify/html-strip-tags-go/strip.go
+++ b/vendor/github.com/grokify/html-strip-tags-go/strip.go
@@ -10,7 +10,7 @@ import (
"fmt"
"html"
"io"
- "io/ioutil"
+ "os"
"path/filepath"
"reflect"
"strings"
@@ -93,12 +93,14 @@ var htmlNormReplacementTable = []string{
//
//
var htmlNospaceReplacementTable = []string{
@@ -335,7 +337,9 @@ func (c context) mangle(templateName string) string {
// HTML5 parsing algorithm because a single token production in the HTML
// grammar may contain embedded actions in a template. For instance, the quoted
// HTML attribute produced by
-//
+//
+//
+//
// is a single token in HTML's grammar but in a template spans several nodes.
type state uint8
@@ -936,13 +940,19 @@ func newIdentCmd(identifier string, pos parse.Pos) *parse.CommandNode {
// nudge returns the context that would result from following empty string
// transitions from the input context.
// For example, parsing:
-// `
-// where {{.X}} evaluates to `javascript:...`
+//
+//
+// where {{.X}} evaluates to `javascript:...`
+//
// Discussion:
-// "ZgotmplZ" is a special value that indicates that unsafe content reached a
-// CSS or URL context at runtime. The output of the example will be
-//
-// If the data comes from a trusted source, use content types to exempt it
-// from filtering: URL(`javascript:...`).
+//
+// "ZgotmplZ" is a special value that indicates that unsafe content reached a
+// CSS or URL context at runtime. The output of the example will be
+//
+// If the data comes from a trusted source, use content types to exempt it
+// from filtering: URL(`javascript:...`).
const (
// OK indicates the lack of an error.
OK ErrorCode = iota
@@ -3523,6 +3536,7 @@ func (t *Template) Lookup(name string) *Template {
// Must is a helper that wraps a call to a function returning (*Template, error)
// and panics if the error is non-nil. It is intended for use in variable initializations
// such as
+//
// var t = template.Must(template.New("name").Parse("html"))
func Must(t *Template, err error) *Template {
if err != nil {
@@ -3554,7 +3568,7 @@ func parseFiles(t *Template, filenames ...string) (*Template, error) {
return nil, fmt.Errorf("html/template: no files named in call to ParseFiles")
}
for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
+ b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributorâ€
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Versionâ€
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contributionâ€
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Softwareâ€
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licensesâ€
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Formâ€
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Workâ€
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “Licenseâ€
+
+ means this document.
+
+1.9. “Licensableâ€
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modificationsâ€
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims†of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary Licenseâ€
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Formâ€
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You†(or “Yourâ€)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You†includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control†means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is†basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses†Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licensesâ€, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
new file mode 100644
index 00000000..444df08f
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/README.md
@@ -0,0 +1,89 @@
+# errwrap
+
+`errwrap` is a package for Go that formalizes the pattern of wrapping errors
+and checking if an error contains another error.
+
+There is a common pattern in Go of taking a returned `error` value and
+then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
+with this pattern is that you completely lose the original `error` structure.
+
+Arguably the _correct_ approach is that you should make a custom structure
+implementing the `error` interface, and have the original error as a field
+on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
+This is a good approach, but you have to know the entire chain of possible
+rewrapping that happens, when you might just care about one.
+
+`errwrap` formalizes this pattern (it doesn't matter what approach you use
+above) by giving a single interface for wrapping errors, checking if a specific
+error is wrapped, and extracting that error.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/errwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/errwrap
+
+## Usage
+
+#### Basic Usage
+
+Below is a very basic example of its usage:
+
+```go
+// A function that always returns an error, but wraps it, like a real
+// function might.
+func tryOpen() error {
+ _, err := os.Open("/i/dont/exist")
+ if err != nil {
+ return errwrap.Wrapf("Doesn't exist: {{err}}", err)
+ }
+
+ return nil
+}
+
+func main() {
+ err := tryOpen()
+
+ // We can use the Contains helpers to check if an error contains
+ // another error. It is safe to do this with a nil error, or with
+ // an error that doesn't even use the errwrap package.
+ if errwrap.Contains(err, "does not exist") {
+ // Do something
+ }
+ if errwrap.ContainsType(err, new(os.PathError)) {
+ // Do something
+ }
+
+ // Or we can use the associated `Get` functions to just extract
+ // a specific error. This would return nil if that specific error doesn't
+ // exist.
+ perr := errwrap.GetType(err, new(os.PathError))
+}
+```
+
+#### Custom Types
+
+If you're already making custom types that properly wrap errors, then
+you can get all the functionality of `errwraps.Contains` and such by
+implementing the `Wrapper` interface with just one function. Example:
+
+```go
+type AppError {
+ Code ErrorCode
+ Err error
+}
+
+func (e *AppError) WrappedErrors() []error {
+ return []error{e.Err}
+}
+```
+
+Now this works:
+
+```go
+err := &AppError{Err: fmt.Errorf("an error")}
+if errwrap.ContainsType(err, fmt.Errorf("")) {
+ // This will work!
+}
+```
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
new file mode 100644
index 00000000..44e368e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/errwrap.go
@@ -0,0 +1,178 @@
+// Package errwrap implements methods to formalize error wrapping in Go.
+//
+// All of the top-level functions that take an `error` are built to be able
+// to take any error, not just wrapped errors. This allows you to use errwrap
+// without having to type-check and type-cast everywhere.
+package errwrap
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+)
+
+// WalkFunc is the callback called for Walk.
+type WalkFunc func(error)
+
+// Wrapper is an interface that can be implemented by custom types to
+// have all the Contains, Get, etc. functions in errwrap work.
+//
+// When Walk reaches a Wrapper, it will call the callback for every
+// wrapped error in addition to the wrapper itself. Since all the top-level
+// functions in errwrap use Walk, this means that all those functions work
+// with your custom type.
+type Wrapper interface {
+ WrappedErrors() []error
+}
+
+// Wrap defines that outer wraps inner, returning an error type that
+// can be cleanly used with the other methods in this package, such as
+// Contains, GetAll, etc.
+//
+// This function won't modify the error message at all (the outer message
+// will be used).
+func Wrap(outer, inner error) error {
+ return &wrappedError{
+ Outer: outer,
+ Inner: inner,
+ }
+}
+
+// Wrapf wraps an error with a formatting message. This is similar to using
+// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
+// errors, you should replace it with this.
+//
+// format is the format of the error message. The string '{{err}}' will
+// be replaced with the original error message.
+//
+// Deprecated: Use fmt.Errorf()
+func Wrapf(format string, err error) error {
+ outerMsg := ""
+ if err != nil {
+ outerMsg = err.Error()
+ }
+
+ outer := errors.New(strings.Replace(
+ format, "{{err}}", outerMsg, -1))
+
+ return Wrap(outer, err)
+}
+
+// Contains checks if the given error contains an error with the
+// message msg. If err is not a wrapped error, this will always return
+// false unless the error itself happens to match this msg.
+func Contains(err error, msg string) bool {
+ return len(GetAll(err, msg)) > 0
+}
+
+// ContainsType checks if the given error contains an error with
+// the same concrete type as v. If err is not a wrapped error, this will
+// check the err itself.
+func ContainsType(err error, v interface{}) bool {
+ return len(GetAllType(err, v)) > 0
+}
+
+// Get is the same as GetAll but returns the deepest matching error.
+func Get(err error, msg string) error {
+ es := GetAll(err, msg)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetType is the same as GetAllType but returns the deepest matching error.
+func GetType(err error, v interface{}) error {
+ es := GetAllType(err, v)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetAll gets all the errors that might be wrapped in err with the
+// given message. The order of the errors is such that the outermost
+// matching error (the most recent wrap) is index zero, and so on.
+func GetAll(err error, msg string) []error {
+ var result []error
+
+ Walk(err, func(err error) {
+ if err.Error() == msg {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// GetAllType gets all the errors that are the same type as v.
+//
+// The order of the return value is the same as described in GetAll.
+func GetAllType(err error, v interface{}) []error {
+ var result []error
+
+ var search string
+ if v != nil {
+ search = reflect.TypeOf(v).String()
+ }
+ Walk(err, func(err error) {
+ var needle string
+ if err != nil {
+ needle = reflect.TypeOf(err).String()
+ }
+
+ if needle == search {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// Walk walks all the wrapped errors in err and calls the callback. If
+// err isn't a wrapped error, this will be called once for err. If err
+// is a wrapped error, the callback will be called for both the wrapper
+// that implements error as well as the wrapped error itself.
+func Walk(err error, cb WalkFunc) {
+ if err == nil {
+ return
+ }
+
+ switch e := err.(type) {
+ case *wrappedError:
+ cb(e.Outer)
+ Walk(e.Inner, cb)
+ case Wrapper:
+ cb(err)
+
+ for _, err := range e.WrappedErrors() {
+ Walk(err, cb)
+ }
+ case interface{ Unwrap() error }:
+ cb(err)
+ Walk(e.Unwrap(), cb)
+ default:
+ cb(err)
+ }
+}
+
+// wrappedError is an implementation of error that has both the
+// outer and inner errors.
+type wrappedError struct {
+ Outer error
+ Inner error
+}
+
+func (w *wrappedError) Error() string {
+ return w.Outer.Error()
+}
+
+func (w *wrappedError) WrappedErrors() []error {
+ return []error{w.Outer, w.Inner}
+}
+
+func (w *wrappedError) Unwrap() error {
+ return w.Inner
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore
new file mode 100644
index 00000000..42cc4105
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/.gitignore
@@ -0,0 +1 @@
+.idea*
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE
new file mode 100644
index 00000000..9938fb50
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2017 HashiCorp, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md
new file mode 100644
index 00000000..983d44c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/README.md
@@ -0,0 +1,149 @@
+# go-hclog
+
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
+
+`go-hclog` is a package for Go that provides a simple key/value logging
+interface for use in development and production environments.
+
+It provides logging levels that provide decreased output based upon the
+desired amount of output, unlike the standard library `log` package.
+
+It provides `Printf` style logging of values via `hclog.Fmt()`.
+
+It provides a human readable output mode for use in development as well as
+JSON output mode for production.
+
+## Stability Note
+
+This library has reached 1.0 stability. Its API can be considered solidified
+and promised through future versions.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/go-hclog`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/go-hclog
+
+## Usage
+
+### Use the global logger
+
+```go
+hclog.Default().Info("hello world")
+```
+
+```text
+2017-07-05T16:15:55.167-0700 [INFO ] hello world
+```
+
+(Note timestamps are removed in future examples for brevity.)
+
+### Create a new logger
+
+```go
+appLogger := hclog.New(&hclog.LoggerOptions{
+ Name: "my-app",
+ Level: hclog.LevelFromString("DEBUG"),
+})
+```
+
+### Emit an Info level message with 2 key/value pairs
+
+```go
+input := "5.5"
+_, err := strconv.ParseInt(input, 10, 32)
+if err != nil {
+ appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
+}
+```
+
+```text
+... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
+```
+
+### Create a new Logger for a major subsystem
+
+```go
+subsystemLogger := appLogger.Named("transport")
+subsystemLogger.Info("we are transporting something")
+```
+
+```text
+... [INFO ] my-app.transport: we are transporting something
+```
+
+Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
+reflecting both the application and subsystem names.
+
+### Create a new Logger with fixed key/value pairs
+
+Using `With()` will include a specific key-value pair in all messages emitted
+by that logger.
+
+```go
+requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
+requestLogger := subsystemLogger.With("request", requestID)
+requestLogger.Info("we are transporting a request")
+```
+
+```text
+... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
+```
+
+This allows sub Loggers to be context specific without having to thread that
+into all the callers.
+
+### Using `hclog.Fmt()`
+
+```go
+totalBandwidth := 200
+appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
+```
+
+```text
+... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s"
+```
+
+### Use this with code that uses the standard library logger
+
+If you want to use the standard library's `log.Logger` interface you can wrap
+`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
+it with the familiar `Println()`, `Printf()`, etc. For example:
+
+```go
+stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
+ InferLevels: true,
+})
+// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
+stdLogger.Printf("[DEBUG] %+v", stdLogger)
+```
+
+```text
+... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
+```
+
+Alternatively, you may configure the system-wide logger:
+
+```go
+// log the standard logger from 'import "log"'
+log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}))
+log.SetPrefix("")
+log.SetFlags(0)
+
+log.Printf("[DEBUG] %d", 42)
+```
+
+```text
+... [DEBUG] my-app: 42
+```
+
+Notice that if `appLogger` is initialized with the `INFO` log level, _and_ you
+specify `InferLevels: true`, you will not see any output here. You must change
+`appLogger` to `DEBUG` to see output. See the docs for more information.
+
+If the log lines start with a timestamp you can use the
+`InferLevelsWithTimestamp` option to try and ignore them. Please note that in order
+for `InferLevelsWithTimestamp` to be relevant, `InferLevels` must be set to `true`.
diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go
new file mode 100644
index 00000000..d00816b3
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go
@@ -0,0 +1,44 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+//go:build !windows
+// +build !windows
+
+package hclog
+
+import (
+ "github.com/mattn/go-isatty"
+)
+
+// hasFD is used to check if the writer has an Fd value to check
+// if it's a terminal.
+type hasFD interface {
+ Fd() uintptr
+}
+
+// setColorization will mutate the values of this logger
+// to appropriately configure colorization options. It provides
+// a wrapper to the output stream on Windows systems.
+func (l *intLogger) setColorization(opts *LoggerOptions) {
+ if opts.Color != AutoColor {
+ return
+ }
+
+ if sc, ok := l.writer.w.(SupportsColor); ok {
+ if !sc.SupportsColor() {
+ l.headerColor = ColorOff
+ l.writer.color = ColorOff
+ }
+ return
+ }
+
+ fi, ok := l.writer.w.(hasFD)
+ if !ok {
+ return
+ }
+
+ if !isatty.IsTerminal(fi.Fd()) {
+ l.headerColor = ColorOff
+ l.writer.color = ColorOff
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go
new file mode 100644
index 00000000..2c3fb9ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go
@@ -0,0 +1,41 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+//go:build windows
+// +build windows
+
+package hclog
+
+import (
+ "os"
+
+ colorable "github.com/mattn/go-colorable"
+)
+
+// setColorization will mutate the values of this logger
+// to appropriately configure colorization options. It provides
+// a wrapper to the output stream on Windows systems.
+func (l *intLogger) setColorization(opts *LoggerOptions) {
+ if opts.Color == ColorOff {
+ return
+ }
+
+ fi, ok := l.writer.w.(*os.File)
+ if !ok {
+ l.writer.color = ColorOff
+ l.headerColor = ColorOff
+ return
+ }
+
+ cfi := colorable.NewColorable(fi)
+
+ // NewColorable detects if color is possible and if it's not, then it
+ // returns the original value. So we can test if we got the original
+ // value back to know if color is possible.
+ if cfi == fi {
+ l.writer.color = ColorOff
+ l.headerColor = ColorOff
+ } else {
+ l.writer.w = cfi
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go
new file mode 100644
index 00000000..eb5aba55
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/context.go
@@ -0,0 +1,41 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "context"
+)
+
+// WithContext inserts a logger into the context and is retrievable
+// with FromContext. The optional args can be set with the same syntax as
+// Logger.With to set fields on the inserted logger. This will not modify
+// the logger argument in-place.
+func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context {
+ // While we could call logger.With even with zero args, we have this
+ // check to avoid unnecessary allocations around creating a copy of a
+ // logger.
+ if len(args) > 0 {
+ logger = logger.With(args...)
+ }
+
+ return context.WithValue(ctx, contextKey, logger)
+}
+
+// FromContext returns a logger from the context. This will return L()
+// (the default logger) if no logger is found in the context. Therefore,
+// this will never return a nil value.
+func FromContext(ctx context.Context) Logger {
+ logger, _ := ctx.Value(contextKey).(Logger)
+ if logger == nil {
+ return L()
+ }
+
+ return logger
+}
+
+// Unexported new type so that our context key never collides with another.
+type contextKeyType struct{}
+
+// contextKey is the key used for the context to store the logger.
+var contextKey = contextKeyType{}
diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go
new file mode 100644
index 00000000..4b73ba55
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/exclude.go
@@ -0,0 +1,74 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "regexp"
+ "strings"
+)
+
+// ExcludeByMessage provides a simple way to build a list of log messages that
+// can be queried and matched. This is meant to be used with the Exclude
+// option on Options to suppress log messages. This does not hold any mutexs
+// within itself, so normal usage would be to Add entries at setup and none after
+// Exclude is going to be called. Exclude is called with a mutex held within
+// the Logger, so that doesn't need to use a mutex. Example usage:
+//
+// f := new(ExcludeByMessage)
+// f.Add("Noisy log message text")
+// appLogger.Exclude = f.Exclude
+type ExcludeByMessage struct {
+ messages map[string]struct{}
+}
+
+// Add a message to be filtered. Do not call this after Exclude is to be called
+// due to concurrency issues.
+func (f *ExcludeByMessage) Add(msg string) {
+ if f.messages == nil {
+ f.messages = make(map[string]struct{})
+ }
+
+ f.messages[msg] = struct{}{}
+}
+
+// Return true if the given message should be included
+func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool {
+ _, ok := f.messages[msg]
+ return ok
+}
+
+// ExcludeByPrefix is a simple type to match a message string that has a common prefix.
+type ExcludeByPrefix string
+
+// Matches an message that starts with the prefix.
+func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool {
+ return strings.HasPrefix(msg, string(p))
+}
+
+// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches
+// the log entry is excluded.
+type ExcludeByRegexp struct {
+ Regexp *regexp.Regexp
+}
+
+// Exclude the log message if the message string matches the regexp
+func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool {
+ return e.Regexp.MatchString(msg)
+}
+
+// ExcludeFuncs is a slice of functions that will called to see if a log entry
+// should be filtered or not. It stops calling functions once at least one returns
+// true.
+type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool
+
+// Calls each function until one of them returns true
+func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool {
+ for _, f := range ff {
+ if f(level, msg, args...) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go
new file mode 100644
index 00000000..a7403f59
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/global.go
@@ -0,0 +1,67 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "sync"
+ "time"
+)
+
+var (
+ protect sync.Once
+ def Logger
+
+ // DefaultOptions is used to create the Default logger. These are read
+ // only when the Default logger is created, so set them as soon as the
+ // process starts.
+ DefaultOptions = &LoggerOptions{
+ Level: DefaultLevel,
+ Output: DefaultOutput,
+ TimeFn: time.Now,
+ }
+)
+
+// Default returns a globally held logger. This can be a good starting
+// place, and then you can use .With() and .Named() to create sub-loggers
+// to be used in more specific contexts.
+// The value of the Default logger can be set via SetDefault() or by
+// changing the options in DefaultOptions.
+//
+// This method is goroutine safe, returning a global from memory, but
+// care should be used if SetDefault() is called it random times
+// in the program as that may result in race conditions and an unexpected
+// Logger being returned.
+func Default() Logger {
+ protect.Do(func() {
+ // If SetDefault was used before Default() was called, we need to
+ // detect that here.
+ if def == nil {
+ def = New(DefaultOptions)
+ }
+ })
+
+ return def
+}
+
+// L is a short alias for Default().
+func L() Logger {
+ return Default()
+}
+
+// SetDefault changes the logger to be returned by Default()and L()
+// to the one given. This allows packages to use the default logger
+// and have higher level packages change it to match the execution
+// environment. It returns any old default if there is one.
+//
+// NOTE: This is expected to be called early in the program to setup
+// a default logger. As such, it does not attempt to make itself
+// not racy with regard to the value of the default logger. Ergo
+// if it is called in goroutines, you may experience race conditions
+// with other goroutines retrieving the default logger. Basically,
+// don't do that.
+func SetDefault(log Logger) Logger {
+ old := def
+ def = log
+ return old
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
new file mode 100644
index 00000000..e9b1c188
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
@@ -0,0 +1,207 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "io"
+ "log"
+ "sync"
+ "sync/atomic"
+)
+
+var _ Logger = &interceptLogger{}
+
+type interceptLogger struct {
+ Logger
+
+ mu *sync.Mutex
+ sinkCount *int32
+ Sinks map[SinkAdapter]struct{}
+}
+
+func NewInterceptLogger(opts *LoggerOptions) InterceptLogger {
+ l := newLogger(opts)
+ if l.callerOffset > 0 {
+ // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log
+ l.callerOffset += 2
+ }
+ intercept := &interceptLogger{
+ Logger: l,
+ mu: new(sync.Mutex),
+ sinkCount: new(int32),
+ Sinks: make(map[SinkAdapter]struct{}),
+ }
+
+ atomic.StoreInt32(intercept.sinkCount, 0)
+
+ return intercept
+}
+
+func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) {
+ i.log(level, msg, args...)
+}
+
+// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc
+// all called Log then direct calls to Log would have a different stack frame
+// depth. By having all the methods call the same helper we ensure the stack
+// frame depth is the same.
+func (i *interceptLogger) log(level Level, msg string, args ...interface{}) {
+ i.Logger.Log(level, msg, args...)
+ if atomic.LoadInt32(i.sinkCount) == 0 {
+ return
+ }
+
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for s := range i.Sinks {
+ s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...)
+ }
+}
+
+// Emit the message and args at TRACE level to log and sinks
+func (i *interceptLogger) Trace(msg string, args ...interface{}) {
+ i.log(Trace, msg, args...)
+}
+
+// Emit the message and args at DEBUG level to log and sinks
+func (i *interceptLogger) Debug(msg string, args ...interface{}) {
+ i.log(Debug, msg, args...)
+}
+
+// Emit the message and args at INFO level to log and sinks
+func (i *interceptLogger) Info(msg string, args ...interface{}) {
+ i.log(Info, msg, args...)
+}
+
+// Emit the message and args at WARN level to log and sinks
+func (i *interceptLogger) Warn(msg string, args ...interface{}) {
+ i.log(Warn, msg, args...)
+}
+
+// Emit the message and args at ERROR level to log and sinks
+func (i *interceptLogger) Error(msg string, args ...interface{}) {
+ i.log(Error, msg, args...)
+}
+
+func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} {
+ top := i.Logger.ImpliedArgs()
+
+ cp := make([]interface{}, len(top)+len(args))
+ copy(cp, top)
+ copy(cp[len(top):], args)
+
+ return cp
+}
+
+// Create a new sub-Logger that a name descending from the current name.
+// This is used to create a subsystem specific Logger.
+// Registered sinks will subscribe to these messages as well.
+func (i *interceptLogger) Named(name string) Logger {
+ return i.NamedIntercept(name)
+}
+
+// Create a new sub-Logger with an explicit name. This ignores the current
+// name. This is used to create a standalone logger that doesn't fall
+// within the normal hierarchy. Registered sinks will subscribe
+// to these messages as well.
+func (i *interceptLogger) ResetNamed(name string) Logger {
+ return i.ResetNamedIntercept(name)
+}
+
+// Create a new sub-Logger that a name decending from the current name.
+// This is used to create a subsystem specific Logger.
+// Registered sinks will subscribe to these messages as well.
+func (i *interceptLogger) NamedIntercept(name string) InterceptLogger {
+ var sub interceptLogger
+
+ sub = *i
+ sub.Logger = i.Logger.Named(name)
+ return &sub
+}
+
+// Create a new sub-Logger with an explicit name. This ignores the current
+// name. This is used to create a standalone logger that doesn't fall
+// within the normal hierarchy. Registered sinks will subscribe
+// to these messages as well.
+func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger {
+ var sub interceptLogger
+
+ sub = *i
+ sub.Logger = i.Logger.ResetNamed(name)
+ return &sub
+}
+
+// Return a sub-Logger for which every emitted log message will contain
+// the given key/value pairs. This is used to create a context specific
+// Logger.
+func (i *interceptLogger) With(args ...interface{}) Logger {
+ var sub interceptLogger
+
+ sub = *i
+
+ sub.Logger = i.Logger.With(args...)
+
+ return &sub
+}
+
+// RegisterSink attaches a SinkAdapter to interceptLoggers sinks.
+func (i *interceptLogger) RegisterSink(sink SinkAdapter) {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ i.Sinks[sink] = struct{}{}
+
+ atomic.AddInt32(i.sinkCount, 1)
+}
+
+// DeregisterSink removes a SinkAdapter from interceptLoggers sinks.
+func (i *interceptLogger) DeregisterSink(sink SinkAdapter) {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ delete(i.Sinks, sink)
+
+ atomic.AddInt32(i.sinkCount, -1)
+}
+
+func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
+ return i.StandardLogger(opts)
+}
+
+func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
+ if opts == nil {
+ opts = &StandardLoggerOptions{}
+ }
+
+ return log.New(i.StandardWriter(opts), "", 0)
+}
+
+func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
+ return i.StandardWriter(opts)
+}
+
+func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
+ return &stdlogAdapter{
+ log: i,
+ inferLevels: opts.InferLevels,
+ inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
+ forceLevel: opts.ForceLevel,
+ }
+}
+
+func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error {
+ if or, ok := i.Logger.(OutputResettable); ok {
+ return or.ResetOutput(opts)
+ } else {
+ return nil
+ }
+}
+
+func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
+ if or, ok := i.Logger.(OutputResettable); ok {
+ return or.ResetOutputWithFlush(opts, flushable)
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go
new file mode 100644
index 00000000..104d82ff
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go
@@ -0,0 +1,1001 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/fatih/color"
+)
+
+// TimeFormat is the time format to use for plain (non-JSON) output.
+// This is a version of RFC3339 that contains millisecond precision.
+const TimeFormat = "2006-01-02T15:04:05.000Z0700"
+
+// TimeFormatJSON is the time format to use for JSON output.
+// This is a version of RFC3339 that contains microsecond precision.
+const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00"
+
+// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json
+const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json"
+
+var (
+ _levelToBracket = map[Level]string{
+ Debug: "[DEBUG]",
+ Trace: "[TRACE]",
+ Info: "[INFO] ",
+ Warn: "[WARN] ",
+ Error: "[ERROR]",
+ }
+
+ _levelToColor = map[Level]*color.Color{
+ Debug: color.New(color.FgHiWhite),
+ Trace: color.New(color.FgHiGreen),
+ Info: color.New(color.FgHiBlue),
+ Warn: color.New(color.FgHiYellow),
+ Error: color.New(color.FgHiRed),
+ }
+
+ faintBoldColor = color.New(color.Faint, color.Bold)
+ faintColor = color.New(color.Faint)
+ faintMultiLinePrefix string
+ faintFieldSeparator string
+ faintFieldSeparatorWithNewLine string
+)
+
+func init() {
+ // Force all the colors to enabled because we do our own detection of color usage.
+ for _, c := range _levelToColor {
+ c.EnableColor()
+ }
+
+ faintBoldColor.EnableColor()
+ faintColor.EnableColor()
+
+ faintMultiLinePrefix = faintColor.Sprint(" | ")
+ faintFieldSeparator = faintColor.Sprint("=")
+ faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n")
+}
+
+// Make sure that intLogger is a Logger
+var _ Logger = &intLogger{}
+
+// intLogger is an internal logger implementation. Internal in that it is
+// defined entirely by this package.
+type intLogger struct {
+ json bool
+ callerOffset int
+ name string
+ timeFormat string
+ timeFn TimeFunction
+ disableTime bool
+
+ // This is an interface so that it's shared by any derived loggers, since
+ // those derived loggers share the bufio.Writer as well.
+ mutex Locker
+ writer *writer
+ level *int32
+
+ // The value of curEpoch when our level was set
+ setEpoch uint64
+
+ // The value of curEpoch the last time we performed the level sync process
+ ownEpoch uint64
+
+ // Shared amongst all the loggers created in this hierachy, used to determine
+ // if the level sync process should be run by comparing it with ownEpoch
+ curEpoch *uint64
+
+ // The logger this one was created from. Only set when syncParentLevel is set
+ parent *intLogger
+
+ headerColor ColorOption
+ fieldColor ColorOption
+
+ implied []interface{}
+
+ exclude func(level Level, msg string, args ...interface{}) bool
+
+ // create subloggers with their own level setting
+ independentLevels bool
+ syncParentLevel bool
+
+ subloggerHook func(sub Logger) Logger
+}
+
+// New returns a configured logger.
+func New(opts *LoggerOptions) Logger {
+ return newLogger(opts)
+}
+
+// NewSinkAdapter returns a SinkAdapter with configured settings
+// defined by LoggerOptions
+func NewSinkAdapter(opts *LoggerOptions) SinkAdapter {
+ l := newLogger(opts)
+ if l.callerOffset > 0 {
+ // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept
+ l.callerOffset += 2
+ }
+ return l
+}
+
+func newLogger(opts *LoggerOptions) *intLogger {
+ if opts == nil {
+ opts = &LoggerOptions{}
+ }
+
+ output := opts.Output
+ if output == nil {
+ output = DefaultOutput
+ }
+
+ level := opts.Level
+ if level == NoLevel {
+ level = DefaultLevel
+ }
+
+ mutex := opts.Mutex
+ if mutex == nil {
+ mutex = new(sync.Mutex)
+ }
+
+ var (
+ primaryColor = ColorOff
+ headerColor = ColorOff
+ fieldColor = ColorOff
+ )
+ switch {
+ case opts.ColorHeaderOnly:
+ headerColor = opts.Color
+ case opts.ColorHeaderAndFields:
+ fieldColor = opts.Color
+ headerColor = opts.Color
+ default:
+ primaryColor = opts.Color
+ }
+
+ l := &intLogger{
+ json: opts.JSONFormat,
+ name: opts.Name,
+ timeFormat: TimeFormat,
+ timeFn: time.Now,
+ disableTime: opts.DisableTime,
+ mutex: mutex,
+ writer: newWriter(output, primaryColor),
+ level: new(int32),
+ curEpoch: new(uint64),
+ exclude: opts.Exclude,
+ independentLevels: opts.IndependentLevels,
+ syncParentLevel: opts.SyncParentLevel,
+ headerColor: headerColor,
+ fieldColor: fieldColor,
+ subloggerHook: opts.SubloggerHook,
+ }
+ if opts.IncludeLocation {
+ l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset
+ }
+
+ if l.json {
+ l.timeFormat = TimeFormatJSON
+ }
+ if opts.TimeFn != nil {
+ l.timeFn = opts.TimeFn
+ }
+ if opts.TimeFormat != "" {
+ l.timeFormat = opts.TimeFormat
+ }
+
+ if l.subloggerHook == nil {
+ l.subloggerHook = identityHook
+ }
+
+ l.setColorization(opts)
+
+ atomic.StoreInt32(l.level, int32(level))
+
+ return l
+}
+
+func identityHook(logger Logger) Logger {
+ return logger
+}
+
+// offsetIntLogger is the stack frame offset in the call stack for the caller to
+// one of the Warn, Info, Log, etc methods.
+const offsetIntLogger = 3
+
+// Log a message and a set of key/value pairs if the given level is at
+// or more severe that the threshold configured in the Logger.
+func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) {
+ if level < l.GetLevel() {
+ return
+ }
+
+ t := l.timeFn()
+
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if l.exclude != nil && l.exclude(level, msg, args...) {
+ return
+ }
+
+ if l.json {
+ l.logJSON(t, name, level, msg, args...)
+ } else {
+ l.logPlain(t, name, level, msg, args...)
+ }
+
+ l.writer.Flush(level)
+}
+
+// Cleanup a path by returning the last 2 segments of the path only.
+func trimCallerPath(path string) string {
+ // lovely borrowed from zap
+ // nb. To make sure we trim the path correctly on Windows too, we
+ // counter-intuitively need to use '/' and *not* os.PathSeparator here,
+ // because the path given originates from Go stdlib, specifically
+ // runtime.Caller() which (as of Mar/17) returns forward slashes even on
+ // Windows.
+ //
+ // See https://github.com/golang/go/issues/3335
+ // and https://github.com/golang/go/issues/18151
+ //
+ // for discussion on the issue on Go side.
+
+ // Find the last separator.
+ idx := strings.LastIndexByte(path, '/')
+ if idx == -1 {
+ return path
+ }
+
+ // Find the penultimate separator.
+ idx = strings.LastIndexByte(path[:idx], '/')
+ if idx == -1 {
+ return path
+ }
+
+ return path[idx+1:]
+}
+
+// isNormal indicates if the rune is one allowed to exist as an unquoted
+// string value. This is a subset of ASCII, `-` through `~`.
+func isNormal(r rune) bool {
+ return 0x2D <= r && r <= 0x7E // - through ~
+}
+
+// needsQuoting returns false if all the runes in string are normal, according
+// to isNormal
+func needsQuoting(str string) bool {
+ for _, r := range str {
+ if !isNormal(r) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// logPlain is the non-JSON logging format function which writes directly
+// to the underlying writer the logger was initialized with.
+//
+// If the logger was initialized with a color function, it also handles
+// applying the color to the log message.
+//
+// Color Options
+// 1. No color.
+// 2. Color the whole log line, based on the level.
+// 3. Color only the header (level) part of the log line.
+// 4. Color both the header and fields of the log line.
+func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
+
+ if !l.disableTime {
+ l.writer.WriteString(t.Format(l.timeFormat))
+ l.writer.WriteByte(' ')
+ }
+
+ s, ok := _levelToBracket[level]
+ if ok {
+ if l.headerColor != ColorOff {
+ color := _levelToColor[level]
+ color.Fprint(l.writer, s)
+ } else {
+ l.writer.WriteString(s)
+ }
+ } else {
+ l.writer.WriteString("[?????]")
+ }
+
+ if l.callerOffset > 0 {
+ if _, file, line, ok := runtime.Caller(l.callerOffset); ok {
+ l.writer.WriteByte(' ')
+ l.writer.WriteString(trimCallerPath(file))
+ l.writer.WriteByte(':')
+ l.writer.WriteString(strconv.Itoa(line))
+ l.writer.WriteByte(':')
+ }
+ }
+
+ l.writer.WriteByte(' ')
+
+ if name != "" {
+ l.writer.WriteString(name)
+ if msg != "" {
+ l.writer.WriteString(": ")
+ l.writer.WriteString(msg)
+ }
+ } else if msg != "" {
+ l.writer.WriteString(msg)
+ }
+
+ args = append(l.implied, args...)
+
+ var stacktrace CapturedStacktrace
+
+ if len(args) > 0 {
+ if len(args)%2 != 0 {
+ cs, ok := args[len(args)-1].(CapturedStacktrace)
+ if ok {
+ args = args[:len(args)-1]
+ stacktrace = cs
+ } else {
+ extra := args[len(args)-1]
+ args = append(args[:len(args)-1], MissingKey, extra)
+ }
+ }
+
+ l.writer.WriteByte(':')
+
+ // Handle the field arguments, which come in pairs (key=val).
+ FOR:
+ for i := 0; i < len(args); i = i + 2 {
+ var (
+ key string
+ val string
+ raw bool
+ )
+
+ // Convert the field value to a string.
+ switch st := args[i+1].(type) {
+ case string:
+ val = st
+ if st == "" {
+ val = `""`
+ raw = true
+ }
+ case int:
+ val = strconv.FormatInt(int64(st), 10)
+ case int64:
+ val = strconv.FormatInt(int64(st), 10)
+ case int32:
+ val = strconv.FormatInt(int64(st), 10)
+ case int16:
+ val = strconv.FormatInt(int64(st), 10)
+ case int8:
+ val = strconv.FormatInt(int64(st), 10)
+ case uint:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint64:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint32:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint16:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint8:
+ val = strconv.FormatUint(uint64(st), 10)
+ case Hex:
+ val = "0x" + strconv.FormatUint(uint64(st), 16)
+ case Octal:
+ val = "0" + strconv.FormatUint(uint64(st), 8)
+ case Binary:
+ val = "0b" + strconv.FormatUint(uint64(st), 2)
+ case CapturedStacktrace:
+ stacktrace = st
+ continue FOR
+ case Format:
+ val = fmt.Sprintf(st[0].(string), st[1:]...)
+ case Quote:
+ raw = true
+ val = strconv.Quote(string(st))
+ default:
+ v := reflect.ValueOf(st)
+ if v.Kind() == reflect.Slice {
+ val = l.renderSlice(v)
+ raw = true
+ } else {
+ val = fmt.Sprintf("%v", st)
+ }
+ }
+
+ // Convert the field key to a string.
+ switch st := args[i].(type) {
+ case string:
+ key = st
+ default:
+ key = fmt.Sprintf("%s", st)
+ }
+
+ // Optionally apply the ANSI "faint" and "bold"
+ // SGR values to the key.
+ if l.fieldColor != ColorOff {
+ key = faintBoldColor.Sprint(key)
+ }
+
+ // Values may contain multiple lines, and that format
+ // is preserved, with each line prefixed with a " | "
+ // to show it's part of a collection of lines.
+ //
+ // Values may also need quoting, if not all the runes
+ // in the value string are "normal", like if they
+ // contain ANSI escape sequences.
+ if strings.Contains(val, "\n") {
+ l.writer.WriteString("\n ")
+ l.writer.WriteString(key)
+ if l.fieldColor != ColorOff {
+ l.writer.WriteString(faintFieldSeparatorWithNewLine)
+ writeIndent(l.writer, val, faintMultiLinePrefix)
+ } else {
+ l.writer.WriteString("=\n")
+ writeIndent(l.writer, val, " | ")
+ }
+ l.writer.WriteString(" ")
+ } else if !raw && needsQuoting(val) {
+ l.writer.WriteByte(' ')
+ l.writer.WriteString(key)
+ if l.fieldColor != ColorOff {
+ l.writer.WriteString(faintFieldSeparator)
+ } else {
+ l.writer.WriteByte('=')
+ }
+ l.writer.WriteByte('"')
+ writeEscapedForOutput(l.writer, val, true)
+ l.writer.WriteByte('"')
+ } else {
+ l.writer.WriteByte(' ')
+ l.writer.WriteString(key)
+ if l.fieldColor != ColorOff {
+ l.writer.WriteString(faintFieldSeparator)
+ } else {
+ l.writer.WriteByte('=')
+ }
+ l.writer.WriteString(val)
+ }
+ }
+ }
+
+ l.writer.WriteString("\n")
+
+ if stacktrace != "" {
+ l.writer.WriteString(string(stacktrace))
+ l.writer.WriteString("\n")
+ }
+}
+
+func writeIndent(w *writer, str string, indent string) {
+ for {
+ nl := strings.IndexByte(str, "\n"[0])
+ if nl == -1 {
+ if str != "" {
+ w.WriteString(indent)
+ writeEscapedForOutput(w, str, false)
+ w.WriteString("\n")
+ }
+ return
+ }
+
+ w.WriteString(indent)
+ writeEscapedForOutput(w, str[:nl], false)
+ w.WriteString("\n")
+ str = str[nl+1:]
+ }
+}
+
+func needsEscaping(str string) bool {
+ for _, b := range str {
+ if !unicode.IsPrint(b) || b == '"' {
+ return true
+ }
+ }
+
+ return false
+}
+
+const (
+ lowerhex = "0123456789abcdef"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) {
+ if !needsEscaping(str) {
+ w.Write([]byte(str))
+ return
+ }
+
+ bb := bufPool.Get().(*bytes.Buffer)
+ bb.Reset()
+
+ defer bufPool.Put(bb)
+
+ for _, r := range str {
+ if escapeQuotes && r == '"' {
+ bb.WriteString(`\"`)
+ } else if unicode.IsPrint(r) {
+ bb.WriteRune(r)
+ } else {
+ switch r {
+ case '\a':
+ bb.WriteString(`\a`)
+ case '\b':
+ bb.WriteString(`\b`)
+ case '\f':
+ bb.WriteString(`\f`)
+ case '\n':
+ bb.WriteString(`\n`)
+ case '\r':
+ bb.WriteString(`\r`)
+ case '\t':
+ bb.WriteString(`\t`)
+ case '\v':
+ bb.WriteString(`\v`)
+ default:
+ switch {
+ case r < ' ':
+ bb.WriteString(`\x`)
+ bb.WriteByte(lowerhex[byte(r)>>4])
+ bb.WriteByte(lowerhex[byte(r)&0xF])
+ case !utf8.ValidRune(r):
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ bb.WriteString(`\u`)
+ for s := 12; s >= 0; s -= 4 {
+ bb.WriteByte(lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ bb.WriteString(`\U`)
+ for s := 28; s >= 0; s -= 4 {
+ bb.WriteByte(lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ }
+ }
+
+ w.Write(bb.Bytes())
+}
+
+func (l *intLogger) renderSlice(v reflect.Value) string {
+ var buf bytes.Buffer
+
+ buf.WriteRune('[')
+
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+
+ sv := v.Index(i)
+
+ var val string
+
+ switch sv.Kind() {
+ case reflect.String:
+ val = strconv.Quote(sv.String())
+ case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
+ val = strconv.FormatInt(sv.Int(), 10)
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ val = strconv.FormatUint(sv.Uint(), 10)
+ default:
+ val = fmt.Sprintf("%v", sv.Interface())
+ if strings.ContainsAny(val, " \t\n\r") {
+ val = strconv.Quote(val)
+ }
+ }
+
+ buf.WriteString(val)
+ }
+
+ buf.WriteRune(']')
+
+ return buf.String()
+}
+
+// JSON logging function
+func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) {
+ vals := l.jsonMapEntry(t, name, level, msg)
+ args = append(l.implied, args...)
+
+ if len(args) > 0 {
+ if len(args)%2 != 0 {
+ cs, ok := args[len(args)-1].(CapturedStacktrace)
+ if ok {
+ args = args[:len(args)-1]
+ vals["stacktrace"] = cs
+ } else {
+ extra := args[len(args)-1]
+ args = append(args[:len(args)-1], MissingKey, extra)
+ }
+ }
+
+ for i := 0; i < len(args); i = i + 2 {
+ val := args[i+1]
+ switch sv := val.(type) {
+ case error:
+ // Check if val is of type error. If error type doesn't
+ // implement json.Marshaler or encoding.TextMarshaler
+ // then set val to err.Error() so that it gets marshaled
+ switch sv.(type) {
+ case json.Marshaler, encoding.TextMarshaler:
+ default:
+ val = sv.Error()
+ }
+ case Format:
+ val = fmt.Sprintf(sv[0].(string), sv[1:]...)
+ }
+
+ var key string
+
+ switch st := args[i].(type) {
+ case string:
+ key = st
+ default:
+ key = fmt.Sprintf("%s", st)
+ }
+ vals[key] = val
+ }
+ }
+
+ err := json.NewEncoder(l.writer).Encode(vals)
+ if err != nil {
+ if _, ok := err.(*json.UnsupportedTypeError); ok {
+ plainVal := l.jsonMapEntry(t, name, level, msg)
+ plainVal["@warn"] = errJsonUnsupportedTypeMsg
+
+ json.NewEncoder(l.writer).Encode(plainVal)
+ }
+ }
+}
+
+func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} {
+ vals := map[string]interface{}{
+ "@message": msg,
+ }
+ if !l.disableTime {
+ vals["@timestamp"] = t.Format(l.timeFormat)
+ }
+
+ var levelStr string
+ switch level {
+ case Error:
+ levelStr = "error"
+ case Warn:
+ levelStr = "warn"
+ case Info:
+ levelStr = "info"
+ case Debug:
+ levelStr = "debug"
+ case Trace:
+ levelStr = "trace"
+ default:
+ levelStr = "all"
+ }
+
+ vals["@level"] = levelStr
+
+ if name != "" {
+ vals["@module"] = name
+ }
+
+ if l.callerOffset > 0 {
+ if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok {
+ vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
+ }
+ }
+ return vals
+}
+
+// Emit the message and args at the provided level
+func (l *intLogger) Log(level Level, msg string, args ...interface{}) {
+ l.log(l.Name(), level, msg, args...)
+}
+
+// Emit the message and args at DEBUG level
+func (l *intLogger) Debug(msg string, args ...interface{}) {
+ l.log(l.Name(), Debug, msg, args...)
+}
+
+// Emit the message and args at TRACE level
+func (l *intLogger) Trace(msg string, args ...interface{}) {
+ l.log(l.Name(), Trace, msg, args...)
+}
+
+// Emit the message and args at INFO level
+func (l *intLogger) Info(msg string, args ...interface{}) {
+ l.log(l.Name(), Info, msg, args...)
+}
+
+// Emit the message and args at WARN level
+func (l *intLogger) Warn(msg string, args ...interface{}) {
+ l.log(l.Name(), Warn, msg, args...)
+}
+
+// Emit the message and args at ERROR level
+func (l *intLogger) Error(msg string, args ...interface{}) {
+ l.log(l.Name(), Error, msg, args...)
+}
+
+// Indicate that the logger would emit TRACE level logs
+func (l *intLogger) IsTrace() bool {
+ return l.GetLevel() == Trace
+}
+
+// Indicate that the logger would emit DEBUG level logs
+func (l *intLogger) IsDebug() bool {
+ return l.GetLevel() <= Debug
+}
+
+// Indicate that the logger would emit INFO level logs
+func (l *intLogger) IsInfo() bool {
+ return l.GetLevel() <= Info
+}
+
+// Indicate that the logger would emit WARN level logs
+func (l *intLogger) IsWarn() bool {
+ return l.GetLevel() <= Warn
+}
+
+// Indicate that the logger would emit ERROR level logs
+func (l *intLogger) IsError() bool {
+ return l.GetLevel() <= Error
+}
+
+const MissingKey = "EXTRA_VALUE_AT_END"
+
+// Return a sub-Logger for which every emitted log message will contain
+// the given key/value pairs. This is used to create a context specific
+// Logger.
+func (l *intLogger) With(args ...interface{}) Logger {
+ var extra interface{}
+
+ if len(args)%2 != 0 {
+ extra = args[len(args)-1]
+ args = args[:len(args)-1]
+ }
+
+ sl := l.copy()
+
+ result := make(map[string]interface{}, len(l.implied)+len(args))
+ keys := make([]string, 0, len(l.implied)+len(args))
+
+ // Read existing args, store map and key for consistent sorting
+ for i := 0; i < len(l.implied); i += 2 {
+ key := l.implied[i].(string)
+ keys = append(keys, key)
+ result[key] = l.implied[i+1]
+ }
+ // Read new args, store map and key for consistent sorting
+ for i := 0; i < len(args); i += 2 {
+ key := args[i].(string)
+ _, exists := result[key]
+ if !exists {
+ keys = append(keys, key)
+ }
+ result[key] = args[i+1]
+ }
+
+ // Sort keys to be consistent
+ sort.Strings(keys)
+
+ sl.implied = make([]interface{}, 0, len(l.implied)+len(args))
+ for _, k := range keys {
+ sl.implied = append(sl.implied, k)
+ sl.implied = append(sl.implied, result[k])
+ }
+
+ if extra != nil {
+ sl.implied = append(sl.implied, MissingKey, extra)
+ }
+
+ return l.subloggerHook(sl)
+}
+
+// Create a new sub-Logger that a name decending from the current name.
+// This is used to create a subsystem specific Logger.
+func (l *intLogger) Named(name string) Logger {
+ sl := l.copy()
+
+ if sl.name != "" {
+ sl.name = sl.name + "." + name
+ } else {
+ sl.name = name
+ }
+
+ return l.subloggerHook(sl)
+}
+
+// Create a new sub-Logger with an explicit name. This ignores the current
+// name. This is used to create a standalone logger that doesn't fall
+// within the normal hierarchy.
+func (l *intLogger) ResetNamed(name string) Logger {
+ sl := l.copy()
+
+ sl.name = name
+
+ return l.subloggerHook(sl)
+}
+
+func (l *intLogger) ResetOutput(opts *LoggerOptions) error {
+ if opts.Output == nil {
+ return errors.New("given output is nil")
+ }
+
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ return l.resetOutput(opts)
+}
+
+func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
+ if opts.Output == nil {
+ return errors.New("given output is nil")
+ }
+ if flushable == nil {
+ return errors.New("flushable is nil")
+ }
+
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if err := flushable.Flush(); err != nil {
+ return err
+ }
+
+ return l.resetOutput(opts)
+}
+
+func (l *intLogger) resetOutput(opts *LoggerOptions) error {
+ l.writer = newWriter(opts.Output, opts.Color)
+ l.setColorization(opts)
+ return nil
+}
+
+// Update the logging level on-the-fly. This will affect all subloggers as
+// well.
+func (l *intLogger) SetLevel(level Level) {
+ if !l.syncParentLevel {
+ atomic.StoreInt32(l.level, int32(level))
+ return
+ }
+
+ nsl := new(int32)
+ *nsl = int32(level)
+
+ l.level = nsl
+
+ l.ownEpoch = atomic.AddUint64(l.curEpoch, 1)
+ l.setEpoch = l.ownEpoch
+}
+
+func (l *intLogger) searchLevelPtr() *int32 {
+ p := l.parent
+
+ ptr := l.level
+
+ max := l.setEpoch
+
+ for p != nil {
+ if p.setEpoch > max {
+ max = p.setEpoch
+ ptr = p.level
+ }
+
+ p = p.parent
+ }
+
+ return ptr
+}
+
+// Returns the current level
+func (l *intLogger) GetLevel() Level {
+ // We perform the loads immediately to keep the CPU pipeline busy, which
+ // effectively makes the second load cost nothing. Once loaded into registers
+ // the comparison returns the already loaded value. The comparison is almost
+ // always true, so the branch predictor should hit consistently with it.
+ var (
+ curEpoch = atomic.LoadUint64(l.curEpoch)
+ level = Level(atomic.LoadInt32(l.level))
+ own = l.ownEpoch
+ )
+
+ if curEpoch == own {
+ return level
+ }
+
+ // Perform the level sync process. We'll avoid doing this next time by seeing the
+ // epoch as current.
+
+ ptr := l.searchLevelPtr()
+ l.level = ptr
+ l.ownEpoch = curEpoch
+
+ return Level(atomic.LoadInt32(ptr))
+}
+
+// Create a *log.Logger that will send it's data through this Logger. This
+// allows packages that expect to be using the standard library log to actually
+// use this logger.
+func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
+ if opts == nil {
+ opts = &StandardLoggerOptions{}
+ }
+
+ return log.New(l.StandardWriter(opts), "", 0)
+}
+
+func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
+ newLog := *l
+ if l.callerOffset > 0 {
+ // the stack is
+ // logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch()
+ // So plus 4.
+ newLog.callerOffset = l.callerOffset + 4
+ }
+ return &stdlogAdapter{
+ log: &newLog,
+ inferLevels: opts.InferLevels,
+ inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
+ forceLevel: opts.ForceLevel,
+ }
+}
+
+// Accept implements the SinkAdapter interface
+func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) {
+ i.log(name, level, msg, args...)
+}
+
+// ImpliedArgs returns the loggers implied args
+func (i *intLogger) ImpliedArgs() []interface{} {
+ return i.implied
+}
+
+// Name returns the loggers name
+func (i *intLogger) Name() string {
+ return i.name
+}
+
+// copy returns a shallow copy of the intLogger, replacing the level pointer
+// when necessary
+func (l *intLogger) copy() *intLogger {
+ sl := *l
+
+ if l.independentLevels {
+ sl.level = new(int32)
+ *sl.level = *l.level
+ } else if l.syncParentLevel {
+ sl.parent = l
+ }
+
+ return &sl
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go
new file mode 100644
index 00000000..d7806fb5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/logger.go
@@ -0,0 +1,412 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "io"
+ "log"
+ "os"
+ "strings"
+ "time"
+)
+
+var (
+ // DefaultOutput is used as the default log output.
+ DefaultOutput io.Writer = os.Stderr
+
+ // DefaultLevel is used as the default log level.
+ DefaultLevel = Info
+)
+
+// Level represents a log level.
+type Level int32
+
+const (
+ // NoLevel is a special level used to indicate that no level has been
+ // set and allow for a default to be used.
+ NoLevel Level = 0
+
+ // Trace is the most verbose level. Intended to be used for the tracing
+ // of actions in code, such as function enters/exits, etc.
+ Trace Level = 1
+
+ // Debug information for programmer low-level analysis.
+ Debug Level = 2
+
+ // Info information about steady state operations.
+ Info Level = 3
+
+ // Warn information about rare but handled events.
+ Warn Level = 4
+
+ // Error information about unrecoverable events.
+ Error Level = 5
+
+ // Off disables all logging output.
+ Off Level = 6
+)
+
+// Format is a simple convenience type for when formatting is required. When
+// processing a value of this type, the logger automatically treats the first
+// argument as a Printf formatting string and passes the rest as the values
+// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}).
+type Format []interface{}
+
+// Fmt returns a Format type. This is a convenience function for creating a Format
+// type.
+func Fmt(str string, args ...interface{}) Format {
+ return append(Format{str}, args...)
+}
+
+// A simple shortcut to format numbers in hex when displayed with the normal
+// text output. For example: L.Info("header value", Hex(17))
+type Hex int
+
+// A simple shortcut to format numbers in octal when displayed with the normal
+// text output. For example: L.Info("perms", Octal(17))
+type Octal int
+
+// A simple shortcut to format numbers in binary when displayed with the normal
+// text output. For example: L.Info("bits", Binary(17))
+type Binary int
+
+// A simple shortcut to format strings with Go quoting. Control and
+// non-printable characters will be escaped with their backslash equivalents in
+// output. Intended for untrusted or multiline strings which should be logged
+// as concisely as possible.
+type Quote string
+
+// ColorOption expresses how the output should be colored, if at all.
+type ColorOption uint8
+
+const (
+ // ColorOff is the default coloration, and does not
+ // inject color codes into the io.Writer.
+ ColorOff ColorOption = iota
+ // AutoColor checks if the io.Writer is a tty,
+ // and if so enables coloring.
+ AutoColor
+ // ForceColor will enable coloring, regardless of whether
+ // the io.Writer is a tty or not.
+ ForceColor
+)
+
+// SupportsColor is an optional interface that can be implemented by the output
+// value. If implemented and SupportsColor() returns true, then AutoColor will
+// enable colorization.
+type SupportsColor interface {
+ SupportsColor() bool
+}
+
+// LevelFromString returns a Level type for the named log level, or "NoLevel" if
+// the level string is invalid. This facilitates setting the log level via
+// config or environment variable by name in a predictable way.
+func LevelFromString(levelStr string) Level {
+ // We don't care about case. Accept both "INFO" and "info".
+ levelStr = strings.ToLower(strings.TrimSpace(levelStr))
+ switch levelStr {
+ case "trace":
+ return Trace
+ case "debug":
+ return Debug
+ case "info":
+ return Info
+ case "warn":
+ return Warn
+ case "error":
+ return Error
+ case "off":
+ return Off
+ default:
+ return NoLevel
+ }
+}
+
+func (l Level) String() string {
+ switch l {
+ case Trace:
+ return "trace"
+ case Debug:
+ return "debug"
+ case Info:
+ return "info"
+ case Warn:
+ return "warn"
+ case Error:
+ return "error"
+ case NoLevel:
+ return "none"
+ case Off:
+ return "off"
+ default:
+ return "unknown"
+ }
+}
+
+// Logger describes the interface that must be implemented by all loggers.
+type Logger interface {
+ // Args are alternating key, val pairs
+ // keys must be strings
+ // vals can be any type, but display is implementation specific
+ // Emit a message and key/value pairs at a provided log level
+ Log(level Level, msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the TRACE level
+ Trace(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the DEBUG level
+ Debug(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the INFO level
+ Info(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the WARN level
+ Warn(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the ERROR level
+ Error(msg string, args ...interface{})
+
+ // Indicate if TRACE logs would be emitted. This and the other Is* guards
+ // are used to elide expensive logging code based on the current level.
+ IsTrace() bool
+
+ // Indicate if DEBUG logs would be emitted. This and the other Is* guards
+ IsDebug() bool
+
+ // Indicate if INFO logs would be emitted. This and the other Is* guards
+ IsInfo() bool
+
+ // Indicate if WARN logs would be emitted. This and the other Is* guards
+ IsWarn() bool
+
+ // Indicate if ERROR logs would be emitted. This and the other Is* guards
+ IsError() bool
+
+ // ImpliedArgs returns With key/value pairs
+ ImpliedArgs() []interface{}
+
+ // Creates a sublogger that will always have the given key/value pairs
+ With(args ...interface{}) Logger
+
+ // Returns the Name of the logger
+ Name() string
+
+ // Create a logger that will prepend the name string on the front of all messages.
+ // If the logger already has a name, the new value will be appended to the current
+ // name. That way, a major subsystem can use this to decorate all it's own logs
+ // without losing context.
+ Named(name string) Logger
+
+ // Create a logger that will prepend the name string on the front of all messages.
+ // This sets the name of the logger to the value directly, unlike Named which honor
+ // the current name as well.
+ ResetNamed(name string) Logger
+
+ // Updates the level. This should affect all related loggers as well,
+ // unless they were created with IndependentLevels. If an
+ // implementation cannot update the level on the fly, it should no-op.
+ SetLevel(level Level)
+
+ // Returns the current level
+ GetLevel() Level
+
+ // Return a value that conforms to the stdlib log.Logger interface
+ StandardLogger(opts *StandardLoggerOptions) *log.Logger
+
+ // Return a value that conforms to io.Writer, which can be passed into log.SetOutput()
+ StandardWriter(opts *StandardLoggerOptions) io.Writer
+}
+
+// StandardLoggerOptions can be used to configure a new standard logger.
+type StandardLoggerOptions struct {
+ // Indicate that some minimal parsing should be done on strings to try
+ // and detect their level and re-emit them.
+ // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
+ // [DEBUG] and strip it off before reapplying it.
+ InferLevels bool
+
+ // Indicate that some minimal parsing should be done on strings to try
+ // and detect their level and re-emit them while ignoring possible
+ // timestamp values in the beginning of the string.
+ // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
+ // [DEBUG] and strip it off before reapplying it.
+ // The timestamp detection may result in false positives and incomplete
+ // string outputs.
+ // InferLevelsWithTimestamp is only relevant if InferLevels is true.
+ InferLevelsWithTimestamp bool
+
+ // ForceLevel is used to force all output from the standard logger to be at
+ // the specified level. Similar to InferLevels, this will strip any level
+ // prefix contained in the logged string before applying the forced level.
+ // If set, this override InferLevels.
+ ForceLevel Level
+}
+
+type TimeFunction = func() time.Time
+
+// LoggerOptions can be used to configure a new logger.
+type LoggerOptions struct {
+ // Name of the subsystem to prefix logs with
+ Name string
+
+ // The threshold for the logger. Anything less severe is suppressed
+ Level Level
+
+ // Where to write the logs to. Defaults to os.Stderr if nil
+ Output io.Writer
+
+ // An optional Locker in case Output is shared. This can be a sync.Mutex or
+ // a NoopLocker if the caller wants control over output, e.g. for batching
+ // log lines.
+ Mutex Locker
+
+ // Control if the output should be in JSON.
+ JSONFormat bool
+
+ // Include file and line information in each log line
+ IncludeLocation bool
+
+ // AdditionalLocationOffset is the number of additional stack levels to skip
+ // when finding the file and line information for the log line
+ AdditionalLocationOffset int
+
+ // The time format to use instead of the default
+ TimeFormat string
+
+ // A function which is called to get the time object that is formatted using `TimeFormat`
+ TimeFn TimeFunction
+
+ // Control whether or not to display the time at all. This is required
+ // because setting TimeFormat to empty assumes the default format.
+ DisableTime bool
+
+ // Color the output. On Windows, colored logs are only available for io.Writers that
+ // are concretely instances of *os.File.
+ Color ColorOption
+
+ // Only color the header, not the body. This can help with readability of long messages.
+ ColorHeaderOnly bool
+
+ // Color the header and message body fields. This can help with readability
+ // of long messages with multiple fields.
+ ColorHeaderAndFields bool
+
+ // A function which is called with the log information and if it returns true the value
+ // should not be logged.
+ // This is useful when interacting with a system that you wish to suppress the log
+ // message for (because it's too noisy, etc)
+ Exclude func(level Level, msg string, args ...interface{}) bool
+
+ // IndependentLevels causes subloggers to be created with an independent
+ // copy of this logger's level. This means that using SetLevel on this
+ // logger will not affect any subloggers, and SetLevel on any subloggers
+ // will not affect the parent or sibling loggers.
+ IndependentLevels bool
+
+ // When set, changing the level of a logger effects only it's direct sub-loggers
+ // rather than all sub-loggers. For example:
+ // a := logger.Named("a")
+ // a.SetLevel(Error)
+ // b := a.Named("b")
+ // c := a.Named("c")
+ // b.GetLevel() => Error
+ // c.GetLevel() => Error
+ // b.SetLevel(Info)
+ // a.GetLevel() => Error
+ // b.GetLevel() => Info
+ // c.GetLevel() => Error
+ // a.SetLevel(Warn)
+ // a.GetLevel() => Warn
+ // b.GetLevel() => Warn
+ // c.GetLevel() => Warn
+ SyncParentLevel bool
+
+ // SubloggerHook registers a function that is called when a sublogger via
+ // Named, With, or ResetNamed is created. If defined, the function is passed
+ // the newly created Logger and the returned Logger is returned from the
+ // original function. This option allows customization via interception and
+ // wrapping of Logger instances.
+ SubloggerHook func(sub Logger) Logger
+}
+
+// InterceptLogger describes the interface for using a logger
+// that can register different output sinks.
+// This is useful for sending lower level log messages
+// to a different output while keeping the root logger
+// at a higher one.
+type InterceptLogger interface {
+ // Logger is the root logger for an InterceptLogger
+ Logger
+
+ // RegisterSink adds a SinkAdapter to the InterceptLogger
+ RegisterSink(sink SinkAdapter)
+
+ // DeregisterSink removes a SinkAdapter from the InterceptLogger
+ DeregisterSink(sink SinkAdapter)
+
+ // Create a interceptlogger that will prepend the name string on the front of all messages.
+ // If the logger already has a name, the new value will be appended to the current
+ // name. That way, a major subsystem can use this to decorate all it's own logs
+ // without losing context.
+ NamedIntercept(name string) InterceptLogger
+
+ // Create a interceptlogger that will prepend the name string on the front of all messages.
+ // This sets the name of the logger to the value directly, unlike Named which honor
+ // the current name as well.
+ ResetNamedIntercept(name string) InterceptLogger
+
+ // Deprecated: use StandardLogger
+ StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger
+
+ // Deprecated: use StandardWriter
+ StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer
+}
+
+// SinkAdapter describes the interface that must be implemented
+// in order to Register a new sink to an InterceptLogger
+type SinkAdapter interface {
+ Accept(name string, level Level, msg string, args ...interface{})
+}
+
+// Flushable represents a method for flushing an output buffer. It can be used
+// if Resetting the log to use a new output, in order to flush the writes to
+// the existing output beforehand.
+type Flushable interface {
+ Flush() error
+}
+
+// OutputResettable provides ways to swap the output in use at runtime
+type OutputResettable interface {
+ // ResetOutput swaps the current output writer with the one given in the
+ // opts. Color options given in opts will be used for the new output.
+ ResetOutput(opts *LoggerOptions) error
+
+ // ResetOutputWithFlush swaps the current output writer with the one given
+ // in the opts, first calling Flush on the given Flushable. Color options
+ // given in opts will be used for the new output.
+ ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error
+}
+
+// Locker is used for locking output. If not set when creating a logger, a
+// sync.Mutex will be used internally.
+type Locker interface {
+ // Lock is called when the output is going to be changed or written to
+ Lock()
+
+ // Unlock is called when the operation that called Lock() completes
+ Unlock()
+}
+
+// NoopLocker implements locker but does nothing. This is useful if the client
+// wants tight control over locking, in order to provide grouping of log
+// entries or other functionality.
+type NoopLocker struct{}
+
+// Lock does nothing
+func (n NoopLocker) Lock() {}
+
+// Unlock does nothing
+func (n NoopLocker) Unlock() {}
+
+var _ Locker = (*NoopLocker)(nil)
diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go
new file mode 100644
index 00000000..d43da809
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go
@@ -0,0 +1,63 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+)
+
+// NewNullLogger instantiates a Logger for which all calls
+// will succeed without doing anything.
+// Useful for testing purposes.
+func NewNullLogger() Logger {
+ return &nullLogger{}
+}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {}
+
+func (l *nullLogger) Trace(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Debug(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Info(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Warn(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Error(msg string, args ...interface{}) {}
+
+func (l *nullLogger) IsTrace() bool { return false }
+
+func (l *nullLogger) IsDebug() bool { return false }
+
+func (l *nullLogger) IsInfo() bool { return false }
+
+func (l *nullLogger) IsWarn() bool { return false }
+
+func (l *nullLogger) IsError() bool { return false }
+
+func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} }
+
+func (l *nullLogger) With(args ...interface{}) Logger { return l }
+
+func (l *nullLogger) Name() string { return "" }
+
+func (l *nullLogger) Named(name string) Logger { return l }
+
+func (l *nullLogger) ResetNamed(name string) Logger { return l }
+
+func (l *nullLogger) SetLevel(level Level) {}
+
+func (l *nullLogger) GetLevel() Level { return NoLevel }
+
+func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
+ return log.New(l.StandardWriter(opts), "", log.LstdFlags)
+}
+
+func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
+ return ioutil.Discard
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go
new file mode 100644
index 00000000..9b27bd3d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go
@@ -0,0 +1,109 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package hclog
+
+import (
+ "bytes"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+var (
+ _stacktraceIgnorePrefixes = []string{
+ "runtime.goexit",
+ "runtime.main",
+ }
+ _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return newProgramCounters(64)
+ },
+ }
+)
+
+// CapturedStacktrace represents a stacktrace captured by a previous call
+// to log.Stacktrace. If passed to a logging function, the stacktrace
+// will be appended.
+type CapturedStacktrace string
+
+// Stacktrace captures a stacktrace of the current goroutine and returns
+// it to be passed to a logging function.
+func Stacktrace() CapturedStacktrace {
+ return CapturedStacktrace(takeStacktrace())
+}
+
+func takeStacktrace() string {
+ programCounters := _stacktracePool.Get().(*programCounters)
+ defer _stacktracePool.Put(programCounters)
+
+ var buffer bytes.Buffer
+
+ for {
+ // Skip the call to runtime.Counters and takeStacktrace so that the
+ // program counters start at the caller of takeStacktrace.
+ n := runtime.Callers(2, programCounters.pcs)
+ if n < cap(programCounters.pcs) {
+ programCounters.pcs = programCounters.pcs[:n]
+ break
+ }
+ // Don't put the too-short counter slice back into the pool; this lets
+ // the pool adjust if we consistently take deep stacktraces.
+ programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+ }
+
+ i := 0
+ frames := runtime.CallersFrames(programCounters.pcs)
+ for frame, more := frames.Next(); more; frame, more = frames.Next() {
+ if shouldIgnoreStacktraceFunction(frame.Function) {
+ continue
+ }
+ if i != 0 {
+ buffer.WriteByte('\n')
+ }
+ i++
+ buffer.WriteString(frame.Function)
+ buffer.WriteByte('\n')
+ buffer.WriteByte('\t')
+ buffer.WriteString(frame.File)
+ buffer.WriteByte(':')
+ buffer.WriteString(strconv.Itoa(int(frame.Line)))
+ }
+
+ return buffer.String()
+}
+
+func shouldIgnoreStacktraceFunction(function string) bool {
+ for _, prefix := range _stacktraceIgnorePrefixes {
+ if strings.HasPrefix(function, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+type programCounters struct {
+ pcs []uintptr
+}
+
+func newProgramCounters(size int) *programCounters {
+ return &programCounters{make([]uintptr, size)}
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go
new file mode 100644
index 00000000..03739b61
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go
@@ -0,0 +1,113 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "bytes"
+ "log"
+ "regexp"
+ "strings"
+)
+
+// Regex to ignore characters commonly found in timestamp formats from the
+// beginning of inputs.
+var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`)
+
+// Provides a io.Writer to shim the data out of *log.Logger
+// and back into our Logger. This is basically the only way to
+// build upon *log.Logger.
+type stdlogAdapter struct {
+ log Logger
+ inferLevels bool
+ inferLevelsWithTimestamp bool
+ forceLevel Level
+}
+
+// Take the data, infer the levels if configured, and send it through
+// a regular Logger.
+func (s *stdlogAdapter) Write(data []byte) (int, error) {
+ str := string(bytes.TrimRight(data, " \t\n"))
+
+ if s.forceLevel != NoLevel {
+ // Use pickLevel to strip log levels included in the line since we are
+ // forcing the level
+ _, str := s.pickLevel(str)
+
+ // Log at the forced level
+ s.dispatch(str, s.forceLevel)
+ } else if s.inferLevels {
+ if s.inferLevelsWithTimestamp {
+ str = s.trimTimestamp(str)
+ }
+
+ level, str := s.pickLevel(str)
+ s.dispatch(str, level)
+ } else {
+ s.log.Info(str)
+ }
+
+ return len(data), nil
+}
+
+func (s *stdlogAdapter) dispatch(str string, level Level) {
+ switch level {
+ case Trace:
+ s.log.Trace(str)
+ case Debug:
+ s.log.Debug(str)
+ case Info:
+ s.log.Info(str)
+ case Warn:
+ s.log.Warn(str)
+ case Error:
+ s.log.Error(str)
+ default:
+ s.log.Info(str)
+ }
+}
+
+// Detect, based on conventions, what log level this is.
+func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
+ switch {
+ case strings.HasPrefix(str, "[DEBUG]"):
+ return Debug, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[TRACE]"):
+ return Trace, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[INFO]"):
+ return Info, strings.TrimSpace(str[6:])
+ case strings.HasPrefix(str, "[WARN]"):
+ return Warn, strings.TrimSpace(str[6:])
+ case strings.HasPrefix(str, "[ERROR]"):
+ return Error, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[ERR]"):
+ return Error, strings.TrimSpace(str[5:])
+ default:
+ return Info, str
+ }
+}
+
+func (s *stdlogAdapter) trimTimestamp(str string) string {
+ idx := logTimestampRegexp.FindStringIndex(str)
+ return str[idx[1]:]
+}
+
+type logWriter struct {
+ l *log.Logger
+}
+
+func (l *logWriter) Write(b []byte) (int, error) {
+ l.l.Println(string(bytes.TrimRight(b, " \n\t")))
+ return len(b), nil
+}
+
+// Takes a standard library logger and returns a Logger that will write to it
+func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger {
+ var dl LoggerOptions = *opts
+
+ // Use the time format that log.Logger uses
+ dl.DisableTime = true
+ dl.Output = &logWriter{l}
+
+ return New(&dl)
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go
new file mode 100644
index 00000000..4ee219bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/writer.go
@@ -0,0 +1,85 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MIT
+
+package hclog
+
+import (
+ "bytes"
+ "io"
+)
+
+type writer struct {
+ b bytes.Buffer
+ w io.Writer
+ color ColorOption
+}
+
+func newWriter(w io.Writer, color ColorOption) *writer {
+ return &writer{w: w, color: color}
+}
+
+func (w *writer) Flush(level Level) (err error) {
+ var unwritten = w.b.Bytes()
+
+ if w.color != ColorOff {
+ color := _levelToColor[level]
+ unwritten = []byte(color.Sprintf("%s", unwritten))
+ }
+
+ if lw, ok := w.w.(LevelWriter); ok {
+ _, err = lw.LevelWrite(level, unwritten)
+ } else {
+ _, err = w.w.Write(unwritten)
+ }
+ w.b.Reset()
+ return err
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ return w.b.Write(p)
+}
+
+func (w *writer) WriteByte(c byte) error {
+ return w.b.WriteByte(c)
+}
+
+func (w *writer) WriteString(s string) (int, error) {
+ return w.b.WriteString(s)
+}
+
+// LevelWriter is the interface that wraps the LevelWrite method.
+type LevelWriter interface {
+ LevelWrite(level Level, p []byte) (n int, err error)
+}
+
+// LeveledWriter writes all log messages to the standard writer,
+// except for log levels that are defined in the overrides map.
+type LeveledWriter struct {
+ standard io.Writer
+ overrides map[Level]io.Writer
+}
+
+// NewLeveledWriter returns an initialized LeveledWriter.
+//
+// standard will be used as the default writer for all log levels,
+// except for log levels that are defined in the overrides map.
+func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter {
+ return &LeveledWriter{
+ standard: standard,
+ overrides: overrides,
+ }
+}
+
+// Write implements io.Writer.
+func (lw *LeveledWriter) Write(p []byte) (int, error) {
+ return lw.standard.Write(p)
+}
+
+// LevelWrite implements LevelWriter.
+func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) {
+ w, ok := lw.overrides[level]
+ if !ok {
+ w = lw.standard
+ }
+ return w.Write(p)
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE
new file mode 100644
index 00000000..82b4de97
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributorâ€
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Versionâ€
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contributionâ€
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Softwareâ€
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licensesâ€
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Formâ€
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Workâ€
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “Licenseâ€
+
+ means this document.
+
+1.9. “Licensableâ€
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modificationsâ€
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims†of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary Licenseâ€
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Formâ€
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You†(or “Yourâ€)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You†includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control†means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is†basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses†Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licensesâ€, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile
new file mode 100644
index 00000000..b97cd6ed
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/Makefile
@@ -0,0 +1,31 @@
+TEST?=./...
+
+default: test
+
+# test runs the test suite and vets the code.
+test: generate
+ @echo "==> Running tests..."
+ @go list $(TEST) \
+ | grep -v "/vendor/" \
+ | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
+
+# testrace runs the race checker
+testrace: generate
+ @echo "==> Running tests (race)..."
+ @go list $(TEST) \
+ | grep -v "/vendor/" \
+ | xargs -n1 go test -timeout=60s -race ${TESTARGS}
+
+# updatedeps installs all the dependencies needed to run and build.
+updatedeps:
+ @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
+
+# generate runs `go generate` to build the dynamically generated source files.
+generate:
+ @echo "==> Generating..."
+ @find . -type f -name '.DS_Store' -delete
+ @go list ./... \
+ | grep -v "/vendor/" \
+ | xargs -n1 go generate
+
+.PHONY: default test testrace updatedeps generate
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
new file mode 100644
index 00000000..71dd308e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/README.md
@@ -0,0 +1,150 @@
+# go-multierror
+
+[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror)
+[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror)
+![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror)
+
+[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror
+[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror
+
+`go-multierror` is a package for Go that provides a mechanism for
+representing a list of `error` values as a single `error`.
+
+This allows a function in Go to return an `error` that might actually
+be a list of errors. If the caller knows this, they can unwrap the
+list and access the errors. If the caller doesn't know, the error
+formats to a nice human-readable format.
+
+`go-multierror` is fully compatible with the Go standard library
+[errors](https://golang.org/pkg/errors/) package, including the
+functions `As`, `Is`, and `Unwrap`. This provides a standardized approach
+for introspecting on error values.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/go-multierror`.
+
+Full documentation is available at
+https://pkg.go.dev/github.com/hashicorp/go-multierror
+
+### Requires go version 1.13 or newer
+
+`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced
+[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which
+this library takes advantage of.
+
+If you need to use an earlier version of go, you can use the
+[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0)
+tag, which doesn't rely on features in go 1.13.
+
+If you see compile errors that look like the below, it's likely that
+you're on an older version of go:
+
+```
+/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As
+/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is
+```
+
+## Usage
+
+go-multierror is easy to use and purposely built to be unobtrusive in
+existing Go applications/libraries that may not be aware of it.
+
+**Building a list of errors**
+
+The `Append` function is used to create a list of errors. This function
+behaves a lot like the Go built-in `append` function: it doesn't matter
+if the first argument is nil, a `multierror.Error`, or any other `error`,
+the function behaves as you would expect.
+
+```go
+var result error
+
+if err := step1(); err != nil {
+ result = multierror.Append(result, err)
+}
+if err := step2(); err != nil {
+ result = multierror.Append(result, err)
+}
+
+return result
+```
+
+**Customizing the formatting of the errors**
+
+By specifying a custom `ErrorFormat`, you can customize the format
+of the `Error() string` function:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here, maybe using Append
+
+if result != nil {
+ result.ErrorFormat = func([]error) string {
+ return "errors!"
+ }
+}
+```
+
+**Accessing the list of errors**
+
+`multierror.Error` implements `error` so if the caller doesn't know about
+multierror, it will work just fine. But if you're aware a multierror might
+be returned, you can use type switches to access the list of errors:
+
+```go
+if err := something(); err != nil {
+ if merr, ok := err.(*multierror.Error); ok {
+ // Use merr.Errors
+ }
+}
+```
+
+You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap)
+function. This will continue to unwrap into subsequent errors until none exist.
+
+**Extracting an error**
+
+The standard library [`errors.As`](https://golang.org/pkg/errors/#As)
+function can be used directly with a multierror to extract a specific error:
+
+```go
+// Assume err is a multierror value
+err := somefunc()
+
+// We want to know if "err" has a "RichErrorType" in it and extract it.
+var errRich RichErrorType
+if errors.As(err, &errRich) {
+ // It has it, and now errRich is populated.
+}
+```
+
+**Checking for an exact error value**
+
+Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables)
+error in the `os` package. You can check if this error is present by using
+the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function.
+
+```go
+// Assume err is a multierror value
+err := somefunc()
+if errors.Is(err, os.ErrNotExist) {
+ // err contains os.ErrNotExist
+}
+```
+
+**Returning a multierror only if there are errors**
+
+If you build a `multierror.Error`, you can use the `ErrorOrNil` function
+to return an `error` implementation only if there are errors to return:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here
+
+// Return the `error` only if errors were added to the multierror, otherwise
+// return nil since there are no errors.
+return result.ErrorOrNil()
+```
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
new file mode 100644
index 00000000..3e2589bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -0,0 +1,43 @@
+package multierror
+
+// Append is a helper function that will append more errors
+// onto an Error in order to create a larger multi-error.
+//
+// If err is not a multierror.Error, then it will be turned into
+// one. If any of the errs are multierr.Error, they will be flattened
+// one level into err.
+// Any nil errors within errs will be ignored. If err is nil, a new
+// *Error will be returned.
+func Append(err error, errs ...error) *Error {
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Go through each error and flatten
+ for _, e := range errs {
+ switch e := e.(type) {
+ case *Error:
+ if e != nil {
+ err.Errors = append(err.Errors, e.Errors...)
+ }
+ default:
+ if e != nil {
+ err.Errors = append(err.Errors, e)
+ }
+ }
+ }
+
+ return err
+ default:
+ newErrs := make([]error, 0, len(errs)+1)
+ if err != nil {
+ newErrs = append(newErrs, err)
+ }
+ newErrs = append(newErrs, errs...)
+
+ return Append(&Error{}, newErrs...)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go
new file mode 100644
index 00000000..aab8e9ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/flatten.go
@@ -0,0 +1,26 @@
+package multierror
+
+// Flatten flattens the given error, merging any *Errors together into
+// a single *Error.
+func Flatten(err error) error {
+ // If it isn't an *Error, just return the error as-is
+ if _, ok := err.(*Error); !ok {
+ return err
+ }
+
+ // Otherwise, make the result and flatten away!
+ flatErr := new(Error)
+ flatten(err, flatErr)
+ return flatErr
+}
+
+func flatten(err error, flatErr *Error) {
+ switch err := err.(type) {
+ case *Error:
+ for _, e := range err.Errors {
+ flatten(e, flatErr)
+ }
+ default:
+ flatErr.Errors = append(flatErr.Errors, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
new file mode 100644
index 00000000..47f13c49
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -0,0 +1,27 @@
+package multierror
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ErrorFormatFunc is a function callback that is called by Error to
+// turn the list of errors into a string.
+type ErrorFormatFunc func([]error) string
+
+// ListFormatFunc is a basic formatter that outputs the number of errors
+// that occurred along with a bullet point list of the errors.
+func ListFormatFunc(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
+ }
+
+ points := make([]string, len(es))
+ for i, err := range es {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ return fmt.Sprintf(
+ "%d errors occurred:\n\t%s\n\n",
+ len(es), strings.Join(points, "\n\t"))
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go
new file mode 100644
index 00000000..9c29efb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/group.go
@@ -0,0 +1,38 @@
+package multierror
+
+import "sync"
+
+// Group is a collection of goroutines which return errors that need to be
+// coalesced.
+type Group struct {
+ mutex sync.Mutex
+ err *Error
+ wg sync.WaitGroup
+}
+
+// Go calls the given function in a new goroutine.
+//
+// If the function returns an error it is added to the group multierror which
+// is returned by Wait.
+func (g *Group) Go(f func() error) {
+ g.wg.Add(1)
+
+ go func() {
+ defer g.wg.Done()
+
+ if err := f(); err != nil {
+ g.mutex.Lock()
+ g.err = Append(g.err, err)
+ g.mutex.Unlock()
+ }
+ }()
+}
+
+// Wait blocks until all function calls from the Go method have returned, then
+// returns the multierror.
+func (g *Group) Wait() *Error {
+ g.wg.Wait()
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ return g.err
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
new file mode 100644
index 00000000..f5457432
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -0,0 +1,121 @@
+package multierror
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Error is an error type to track multiple errors. This is used to
+// accumulate errors in cases and return them as a single "error".
+type Error struct {
+ Errors []error
+ ErrorFormat ErrorFormatFunc
+}
+
+func (e *Error) Error() string {
+ fn := e.ErrorFormat
+ if fn == nil {
+ fn = ListFormatFunc
+ }
+
+ return fn(e.Errors)
+}
+
+// ErrorOrNil returns an error interface if this Error represents
+// a list of errors, or returns nil if the list of errors is empty. This
+// function is useful at the end of accumulation to make sure that the value
+// returned represents the existence of errors.
+func (e *Error) ErrorOrNil() error {
+ if e == nil {
+ return nil
+ }
+ if len(e.Errors) == 0 {
+ return nil
+ }
+
+ return e
+}
+
+func (e *Error) GoString() string {
+ return fmt.Sprintf("*%#v", *e)
+}
+
+// WrappedErrors returns the list of errors that this Error is wrapping. It is
+// an implementation of the errwrap.Wrapper interface so that multierror.Error
+// can be used with that library.
+//
+// This method is not safe to be called concurrently. Unlike accessing the
+// Errors field directly, this function also checks if the multierror is nil to
+// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface.
+func (e *Error) WrappedErrors() []error {
+ if e == nil {
+ return nil
+ }
+ return e.Errors
+}
+
+// Unwrap returns an error from Error (or nil if there are no errors).
+// This error returned will further support Unwrap to get the next error,
+// etc. The order will match the order of Errors in the multierror.Error
+// at the time of calling.
+//
+// The resulting error supports errors.As/Is/Unwrap so you can continue
+// to use the stdlib errors package to introspect further.
+//
+// This will perform a shallow copy of the errors slice. Any errors appended
+// to this error after calling Unwrap will not be available until a new
+// Unwrap is called on the multierror.Error.
+func (e *Error) Unwrap() error {
+ // If we have no errors then we do nothing
+ if e == nil || len(e.Errors) == 0 {
+ return nil
+ }
+
+ // If we have exactly one error, we can just return that directly.
+ if len(e.Errors) == 1 {
+ return e.Errors[0]
+ }
+
+ // Shallow copy the slice
+ errs := make([]error, len(e.Errors))
+ copy(errs, e.Errors)
+ return chain(errs)
+}
+
+// chain implements the interfaces necessary for errors.Is/As/Unwrap to
+// work in a deterministic way with multierror. A chain tracks a list of
+// errors while accounting for the current represented error. This lets
+// Is/As be meaningful.
+//
+// Unwrap returns the next error. In the cleanest form, Unwrap would return
+// the wrapped error here but we can't do that if we want to properly
+// get access to all the errors. Instead, users are recommended to use
+// Is/As to get the correct error type out.
+//
+// Precondition: []error is non-empty (len > 0)
+type chain []error
+
+// Error implements the error interface
+func (e chain) Error() string {
+ return e[0].Error()
+}
+
+// Unwrap implements errors.Unwrap by returning the next error in the
+// chain or nil if there are no more errors.
+func (e chain) Unwrap() error {
+ if len(e) == 1 {
+ return nil
+ }
+
+ return e[1:]
+}
+
+// As implements errors.As by attempting to map to the current value.
+func (e chain) As(target interface{}) bool {
+ return errors.As(e[0], target)
+}
+
+// Is implements errors.Is by comparing the current value directly.
+func (e chain) Is(target error) bool {
+ return errors.Is(e[0], target)
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go
new file mode 100644
index 00000000..5c477abe
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/prefix.go
@@ -0,0 +1,37 @@
+package multierror
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/errwrap"
+)
+
+// Prefix is a helper function that will prefix some text
+// to the given error. If the error is a multierror.Error, then
+// it will be prefixed to each wrapped error.
+//
+// This is useful to use when appending multiple multierrors
+// together in order to give better scoping.
+func Prefix(err error, prefix string) error {
+ if err == nil {
+ return nil
+ }
+
+ format := fmt.Sprintf("%s {{err}}", prefix)
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Wrap each of the errors
+ for i, e := range err.Errors {
+ err.Errors[i] = errwrap.Wrapf(format, e)
+ }
+
+ return err
+ default:
+ return errwrap.Wrapf(format, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go
new file mode 100644
index 00000000..fecb14e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/sort.go
@@ -0,0 +1,16 @@
+package multierror
+
+// Len implements sort.Interface function for length
+func (err Error) Len() int {
+ return len(err.Errors)
+}
+
+// Swap implements sort.Interface function for swapping elements
+func (err Error) Swap(i, j int) {
+ err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
+}
+
+// Less implements sort.Interface function for determining order
+func (err Error) Less(i, j int) bool {
+ return err.Errors[i].Error() < err.Errors[j].Error()
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore
new file mode 100644
index 00000000..4befed30
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/.gitignore
@@ -0,0 +1,2 @@
+.DS_Store
+.idea
diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md
new file mode 100644
index 00000000..3d0379c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md
@@ -0,0 +1,102 @@
+## v1.6.0
+
+CHANGES:
+
+* plugin: Plugins written in other languages can optionally start to advertise whether they support gRPC broker multiplexing.
+ If the environment variable `PLUGIN_MULTIPLEX_GRPC` is set, it is safe to include a seventh field containing a boolean
+ value in the `|`-separated protocol negotiation line.
+
+ENHANCEMENTS:
+
+* Support muxing gRPC broker connections over a single listener [[GH-288](https://github.com/hashicorp/go-plugin/pull/288)]
+* client: Configurable buffer size for reading plugin log lines [[GH-265](https://github.com/hashicorp/go-plugin/pull/265)]
+* Use `buf` for proto generation [[GH-286](https://github.com/hashicorp/go-plugin/pull/286)]
+* deps: bump golang.org/x/net to v0.17.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)]
+* deps: bump golang.org/x/sys to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)]
+* deps: bump golang.org/x/text to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)]
+
+## v1.5.2
+
+ENHANCEMENTS:
+
+client: New `UnixSocketConfig.TempDir` option allows setting the directory to use when creating plugin-specific Unix socket directories [[GH-282](https://github.com/hashicorp/go-plugin/pull/282)]
+
+## v1.5.1
+
+BUGS:
+
+* server: `PLUGIN_UNIX_SOCKET_DIR` is consistently used for gRPC broker sockets as well as the initial socket [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)]
+
+ENHANCEMENTS:
+
+* client: New `UnixSocketConfig` option in `ClientConfig` to support making the client's Unix sockets group-writable [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)]
+
+## v1.5.0
+
+ENHANCEMENTS:
+
+* client: New `runner.Runner` interface to support clients providing custom plugin command runner implementations [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
+ * Accessible via new `ClientConfig` field `RunnerFunc`, which is mutually exclusive with `Cmd` and `Reattach`
+ * Reattaching support via `ReattachConfig` field `ReattachFunc`
+* client: New `ClientConfig` field `SkipHostEnv` allows omitting the client process' own environment variables from the plugin command's environment [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
+* client: Add `ID()` method to `Client` for retrieving the pid or other unique ID of a running plugin [[GH-272](https://github.com/hashicorp/go-plugin/pull/272)]
+* server: Support setting the directory to create Unix sockets in with the env var `PLUGIN_UNIX_SOCKET_DIR` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
+* server: Support setting group write permission and a custom group name or gid owner with the env var `PLUGIN_UNIX_SOCKET_GROUP` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
+
+## v1.4.11-rc1
+
+ENHANCEMENTS:
+
+* deps: bump protoreflect to v1.15.1 [[GH-264](https://github.com/hashicorp/go-plugin/pull/264)]
+
+## v1.4.10
+
+BUG FIXES:
+
+* additional notes: ensure to close files [[GH-241](https://github.com/hashicorp/go-plugin/pull/241)]
+
+ENHANCEMENTS:
+
+* deps: Remove direct dependency on golang.org/x/net [[GH-240](https://github.com/hashicorp/go-plugin/pull/240)]
+
+## v1.4.9
+
+ENHANCEMENTS:
+
+* client: Remove log warning introduced in 1.4.5 when SecureConfig is nil. [[GH-238](https://github.com/hashicorp/go-plugin/pull/238)]
+
+## v1.4.8
+
+BUG FIXES:
+
+* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)]
+
+## v1.4.7
+
+ENHANCEMENTS:
+
+* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)]
+
+## v1.4.6
+
+BUG FIXES:
+
+* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)]
+
+## v1.4.5
+
+ENHANCEMENTS:
+
+* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)]
+
+
+## v1.4.4
+
+ENHANCEMENTS:
+
+* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)]
+
+BUG FIXES:
+
+* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)]
+* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)]
diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE
new file mode 100644
index 00000000..042324fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/LICENSE
@@ -0,0 +1,355 @@
+Copyright (c) 2016 HashiCorp, Inc.
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributorâ€
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Versionâ€
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contributionâ€
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Softwareâ€
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licensesâ€
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Formâ€
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Workâ€
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “Licenseâ€
+
+ means this document.
+
+1.9. “Licensableâ€
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modificationsâ€
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims†of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary Licenseâ€
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Formâ€
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You†(or “Yourâ€)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You†includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control†means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is†basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses†Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licensesâ€, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
new file mode 100644
index 00000000..50baee06
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/README.md
@@ -0,0 +1,165 @@
+# Go Plugin System over RPC
+
+`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
+that has been in use by HashiCorp tooling for over 4 years. While initially
+created for [Packer](https://www.packer.io), it is additionally in use by
+[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io),
+[Vault](https://www.vaultproject.io),
+[Boundary](https://www.boundaryproject.io),
+and [Waypoint](https://www.waypointproject.io).
+
+While the plugin system is over RPC, it is currently only designed to work
+over a local [reliable] network. Plugins over a real network are not supported
+and will lead to unexpected behavior.
+
+This plugin system has been used on millions of machines across many different
+projects and has proven to be battle hardened and ready for production use.
+
+## Features
+
+The HashiCorp plugin system supports a number of features:
+
+**Plugins are Go interface implementations.** This makes writing and consuming
+plugins feel very natural. To a plugin author: you just implement an
+interface as if it were going to run in the same process. For a plugin user:
+you just use and call functions on an interface as if it were in the same
+process. This plugin system handles the communication in between.
+
+**Cross-language support.** Plugins can be written (and consumed) by
+almost every major language. This library supports serving plugins via
+[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written
+in any language.
+
+**Complex arguments and return values are supported.** This library
+provides APIs for handling complex arguments and return values such
+as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
+(`MuxBroker`) for creating new connections between the client/server to
+serve additional interfaces or transfer raw data.
+
+**Bidirectional communication.** Because the plugin system supports
+complex arguments, the host process can send it interface implementations
+and the plugin can call back into the host process.
+
+**Built-in Logging.** Any plugins that use the `log` standard library
+will have log data automatically sent to the host process. The host
+process will mirror this output prefixed with the path to the plugin
+binary. This makes debugging with plugins simple. If the host system
+uses [hclog](https://github.com/hashicorp/go-hclog) then the log data
+will be structured. If the plugin also uses hclog, logs from the plugin
+will be sent to the host hclog and be structured.
+
+**Protocol Versioning.** A very basic "protocol version" is supported that
+can be incremented to invalidate any previous plugins. This is useful when
+interface signatures are changing, protocol level changes are necessary,
+etc. When a protocol version is incompatible, a human friendly error
+message is shown to the end user.
+
+**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
+to use stdout/stderr as usual and the output will get mirrored back to
+the host process. The host process can control what `io.Writer` these
+streams go to to prevent this from happening.
+
+**TTY Preservation.** Plugin subprocesses are connected to the identical
+stdin file descriptor as the host process, allowing software that requires
+a TTY to work. For example, a plugin can execute `ssh` and even though there
+are multiple subprocesses and RPC happening, it will look and act perfectly
+to the end user.
+
+**Host upgrade while a plugin is running.** Plugins can be "reattached"
+so that the host process can be upgraded while the plugin is still running.
+This requires the host/plugin to know this is possible and daemonize
+properly. `NewClient` takes a `ReattachConfig` to determine if and how to
+reattach.
+
+**Cryptographically Secure Plugins.** Plugins can be verified with an expected
+checksum and RPC communications can be configured to use TLS. The host process
+must be properly secured to protect this configuration.
+
+## Architecture
+
+The HashiCorp plugin system works by launching subprocesses and communicating
+over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single
+connection is made between any plugin and the host process. For net/rpc-based
+plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux)
+library to multiplex any other connections on top. For gRPC-based plugins,
+the HTTP2 protocol handles multiplexing.
+
+This architecture has a number of benefits:
+
+ * Plugins can't crash your host process: A panic in a plugin doesn't
+ panic the plugin user.
+
+ * Plugins are very easy to write: just write a Go application and `go build`.
+ Or use any other language to write a gRPC server with a tiny amount of
+ boilerplate to support go-plugin.
+
+ * Plugins are very easy to install: just put the binary in a location where
+ the host will find it (depends on the host but this library also provides
+ helpers), and the plugin host handles the rest.
+
+ * Plugins can be relatively secure: The plugin only has access to the
+ interfaces and args given to it, not to the entire memory space of the
+ process. Additionally, go-plugin can communicate with the plugin over
+ TLS.
+
+## Usage
+
+To use the plugin system, you must take the following steps. These are
+high-level steps that must be done. Examples are available in the
+`examples/` directory.
+
+ 1. Choose the interface(s) you want to expose for plugins.
+
+ 2. For each interface, implement an implementation of that interface
+ that communicates over a `net/rpc` connection or over a
+ [gRPC](http://www.grpc.io) connection or both. You'll have to implement
+ both a client and server implementation.
+
+ 3. Create a `Plugin` implementation that knows how to create the RPC
+ client/server for a given plugin type.
+
+ 4. Plugin authors call `plugin.Serve` to serve a plugin from the
+ `main` function.
+
+ 5. Plugin users use `plugin.Client` to launch a subprocess and request
+ an interface implementation over RPC.
+
+That's it! In practice, step 2 is the most tedious and time consuming step.
+Even so, it isn't very difficult and you can see examples in the `examples/`
+directory as well as throughout our various open source projects.
+
+For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
+
+## Roadmap
+
+Our plugin system is constantly evolving. As we use the plugin system for
+new projects or for new features in existing projects, we constantly find
+improvements we can make.
+
+At this point in time, the roadmap for the plugin system is:
+
+**Semantic Versioning.** Plugins will be able to implement a semantic version.
+This plugin system will give host processes a system for constraining
+versions. This is in addition to the protocol versioning already present
+which is more for larger underlying changes.
+
+## What About Shared Libraries?
+
+When we started using plugins (late 2012, early 2013), plugins over RPC
+were the only option since Go didn't support dynamic library loading. Today,
+Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with
+a number of limitations. Since 2012, our plugin system has stabilized
+from tens of millions of users using it, and has many benefits we've come to
+value greatly.
+
+For example, we use this plugin system in
+[Vault](https://www.vaultproject.io) where dynamic library loading is
+not acceptable for security reasons. That is an extreme
+example, but we believe our library system has more upsides than downsides
+over dynamic library loading and since we've had it built and tested for years,
+we'll continue to use it.
+
+Shared libraries have one major advantage over our system which is much
+higher performance. In real world scenarios across our various tools,
+we've never required any more performance out of our plugin system and it
+has seen very high throughput, so this isn't a concern for us at the moment.
diff --git a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml
new file mode 100644
index 00000000..033d0153
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml
@@ -0,0 +1,14 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: MPL-2.0
+
+version: v1
+plugins:
+ - plugin: buf.build/protocolbuffers/go
+ out: .
+ opt:
+ - paths=source_relative
+ - plugin: buf.build/grpc/go:v1.3.0
+ out: .
+ opt:
+ - paths=source_relative
+ - require_unimplemented_servers=false
diff --git a/vendor/github.com/hashicorp/go-plugin/buf.yaml b/vendor/github.com/hashicorp/go-plugin/buf.yaml
new file mode 100644
index 00000000..3d0da4c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/buf.yaml
@@ -0,0 +1,7 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: MPL-2.0
+
+version: v1
+build:
+ excludes:
+ - examples/
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
new file mode 100644
index 00000000..73f6b351
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/client.go
@@ -0,0 +1,1239 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "bufio"
+ "context"
+ "crypto/subtle"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-plugin/internal/cmdrunner"
+ "github.com/hashicorp/go-plugin/internal/grpcmux"
+ "github.com/hashicorp/go-plugin/runner"
+ "google.golang.org/grpc"
+)
+
+// If this is 1, then we've called CleanupClients. This can be used
+// by plugin RPC implementations to change error behavior since you
+// can expected network connection errors at this point. This should be
+// read by using sync/atomic.
+var Killed uint32 = 0
+
+// This is a slice of the "managed" clients which are cleaned up when
+// calling Cleanup
+var managedClients = make([]*Client, 0, 5)
+var managedClientsLock sync.Mutex
+
+// Error types
+var (
+ // ErrProcessNotFound is returned when a client is instantiated to
+ // reattach to an existing process and it isn't found.
+ ErrProcessNotFound = cmdrunner.ErrProcessNotFound
+
+ // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match
+ // the one provided in the SecureConfig.
+ ErrChecksumsDoNotMatch = errors.New("checksums did not match")
+
+ // ErrSecureNoChecksum is returned when an empty checksum is provided to the
+ // SecureConfig.
+ ErrSecureConfigNoChecksum = errors.New("no checksum provided")
+
+ // ErrSecureNoHash is returned when a nil Hash object is provided to the
+ // SecureConfig.
+ ErrSecureConfigNoHash = errors.New("no hash implementation provided")
+
+ // ErrSecureConfigAndReattach is returned when both Reattach and
+ // SecureConfig are set.
+ ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set")
+
+ // ErrGRPCBrokerMuxNotSupported is returned when the client requests
+ // multiplexing over the gRPC broker, but the plugin does not support the
+ // feature. In most cases, this should be resolvable by updating and
+ // rebuilding the plugin, or restarting the plugin with
+ // ClientConfig.GRPCBrokerMultiplex set to false.
+ ErrGRPCBrokerMuxNotSupported = errors.New("client requested gRPC broker multiplexing but plugin does not support the feature")
+)
+
+// defaultPluginLogBufferSize is the default size of the buffer used to read from stderr for plugin log lines.
+const defaultPluginLogBufferSize = 64 * 1024
+
+// Client handles the lifecycle of a plugin application. It launches
+// plugins, connects to them, dispenses interface implementations, and handles
+// killing the process.
+//
+// Plugin hosts should use one Client for each plugin executable. To
+// dispense a plugin type, use the `Client.Client` function, and then
+// cal `Dispense`. This awkward API is mostly historical but is used to split
+// the client that deals with subprocess management and the client that
+// does RPC management.
+//
+// See NewClient and ClientConfig for using a Client.
+type Client struct {
+ config *ClientConfig
+ exited bool
+ l sync.Mutex
+ address net.Addr
+ runner runner.AttachedRunner
+ client ClientProtocol
+ protocol Protocol
+ logger hclog.Logger
+ doneCtx context.Context
+ ctxCancel context.CancelFunc
+ negotiatedVersion int
+
+ // clientWaitGroup is used to manage the lifecycle of the plugin management
+ // goroutines.
+ clientWaitGroup sync.WaitGroup
+
+ // stderrWaitGroup is used to prevent the command's Wait() function from
+ // being called before we've finished reading from the stderr pipe.
+ stderrWaitGroup sync.WaitGroup
+
+ // processKilled is used for testing only, to flag when the process was
+ // forcefully killed.
+ processKilled bool
+
+ unixSocketCfg UnixSocketConfig
+
+ grpcMuxerOnce sync.Once
+ grpcMuxer *grpcmux.GRPCClientMuxer
+}
+
+// NegotiatedVersion returns the protocol version negotiated with the server.
+// This is only valid after Start() is called.
+func (c *Client) NegotiatedVersion() int {
+ return c.negotiatedVersion
+}
+
+// ID returns a unique ID for the running plugin. By default this is the process
+// ID (pid), but it could take other forms if RunnerFunc was provided.
+func (c *Client) ID() string {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.runner != nil {
+ return c.runner.ID()
+ }
+
+ return ""
+}
+
+// ClientConfig is the configuration used to initialize a new
+// plugin client. After being used to initialize a plugin client,
+// that configuration must not be modified again.
+type ClientConfig struct {
+ // HandshakeConfig is the configuration that must match servers.
+ HandshakeConfig
+
+ // Plugins are the plugins that can be consumed.
+ // The implied version of this PluginSet is the Handshake.ProtocolVersion.
+ Plugins PluginSet
+
+ // VersionedPlugins is a map of PluginSets for specific protocol versions.
+ // These can be used to negotiate a compatible version between client and
+ // server. If this is set, Handshake.ProtocolVersion is not required.
+ VersionedPlugins map[int]PluginSet
+
+ // One of the following must be set, but not both.
+ //
+ // Cmd is the unstarted subprocess for starting the plugin. If this is
+ // set, then the Client starts the plugin process on its own and connects
+ // to it.
+ //
+ // Reattach is configuration for reattaching to an existing plugin process
+ // that is already running. This isn't common.
+ Cmd *exec.Cmd
+ Reattach *ReattachConfig
+
+ // RunnerFunc allows consumers to provide their own implementation of
+ // runner.Runner and control the context within which a plugin is executed.
+ // The cmd argument will have been copied from the config and populated with
+ // environment variables that a go-plugin server expects to read such as
+ // AutoMTLS certs and the magic cookie key.
+ RunnerFunc func(l hclog.Logger, cmd *exec.Cmd, tmpDir string) (runner.Runner, error)
+
+ // SecureConfig is configuration for verifying the integrity of the
+ // executable. It can not be used with Reattach.
+ SecureConfig *SecureConfig
+
+ // TLSConfig is used to enable TLS on the RPC client.
+ TLSConfig *tls.Config
+
+ // Managed represents if the client should be managed by the
+ // plugin package or not. If true, then by calling CleanupClients,
+ // it will automatically be cleaned up. Otherwise, the client
+ // user is fully responsible for making sure to Kill all plugin
+ // clients. By default the client is _not_ managed.
+ Managed bool
+
+ // The minimum and maximum port to use for communicating with
+ // the subprocess. If not set, this defaults to 10,000 and 25,000
+ // respectively.
+ MinPort, MaxPort uint
+
+ // StartTimeout is the timeout to wait for the plugin to say it
+ // has started successfully.
+ StartTimeout time.Duration
+
+ // If non-nil, then the stderr of the client will be written to here
+ // (as well as the log). This is the original os.Stderr of the subprocess.
+ // This isn't the output of synced stderr.
+ Stderr io.Writer
+
+ // SyncStdout, SyncStderr can be set to override the
+ // respective os.Std* values in the plugin. Care should be taken to
+ // avoid races here. If these are nil, then this will be set to
+ // ioutil.Discard.
+ SyncStdout io.Writer
+ SyncStderr io.Writer
+
+ // AllowedProtocols is a list of allowed protocols. If this isn't set,
+ // then only netrpc is allowed. This is so that older go-plugin systems
+ // can show friendly errors if they see a plugin with an unknown
+ // protocol.
+ //
+ // By setting this, you can cause an error immediately on plugin start
+ // if an unsupported protocol is used with a good error message.
+ //
+ // If this isn't set at all (nil value), then only net/rpc is accepted.
+ // This is done for legacy reasons. You must explicitly opt-in to
+ // new protocols.
+ AllowedProtocols []Protocol
+
+ // Logger is the logger that the client will used. If none is provided,
+ // it will default to hclog's default logger.
+ Logger hclog.Logger
+
+ // PluginLogBufferSize is the buffer size(bytes) to read from stderr for plugin log lines.
+ // If this is 0, then the default of 64KB is used.
+ PluginLogBufferSize int
+
+ // AutoMTLS has the client and server automatically negotiate mTLS for
+ // transport authentication. This ensures that only the original client will
+ // be allowed to connect to the server, and all other connections will be
+ // rejected. The client will also refuse to connect to any server that isn't
+ // the original instance started by the client.
+ //
+ // In this mode of operation, the client generates a one-time use tls
+ // certificate, sends the public x.509 certificate to the new server, and
+ // the server generates a one-time use tls certificate, and sends the public
+ // x.509 certificate back to the client. These are used to authenticate all
+ // rpc connections between the client and server.
+ //
+ // Setting AutoMTLS to true implies that the server must support the
+ // protocol, and correctly negotiate the tls certificates, or a connection
+ // failure will result.
+ //
+ // The client should not set TLSConfig, nor should the server set a
+ // TLSProvider, because AutoMTLS implies that a new certificate and tls
+ // configuration will be generated at startup.
+ //
+ // You cannot Reattach to a server with this option enabled.
+ AutoMTLS bool
+
+ // GRPCDialOptions allows plugin users to pass custom grpc.DialOption
+ // to create gRPC connections. This only affects plugins using the gRPC
+ // protocol.
+ GRPCDialOptions []grpc.DialOption
+
+ // GRPCBrokerMultiplex turns on multiplexing for the gRPC broker. The gRPC
+ // broker will multiplex all brokered gRPC servers over the plugin's original
+ // listener socket instead of making a new listener for each server. The
+ // go-plugin library currently only includes a Go implementation for the
+ // server (i.e. plugin) side of gRPC broker multiplexing.
+ //
+ // Does not support reattaching.
+ //
+ // Multiplexed gRPC streams MUST be established sequentially, i.e. after
+ // calling AcceptAndServe from one side, wait for the other side to Dial
+ // before calling AcceptAndServe again.
+ GRPCBrokerMultiplex bool
+
+ // SkipHostEnv allows plugins to run without inheriting the parent process'
+ // environment variables.
+ SkipHostEnv bool
+
+ // UnixSocketConfig configures additional options for any Unix sockets
+ // that are created. Not normally required. Not supported on Windows.
+ UnixSocketConfig *UnixSocketConfig
+}
+
+type UnixSocketConfig struct {
+ // If set, go-plugin will change the owner of any Unix sockets created to
+ // this group, and set them as group-writable. Can be a name or gid. The
+ // client process must be a member of this group or chown will fail.
+ Group string
+
+ // TempDir specifies the base directory to use when creating a plugin-specific
+ // temporary directory. It is expected to already exist and be writable. If
+ // not set, defaults to the directory chosen by os.MkdirTemp.
+ TempDir string
+
+ // The directory to create Unix sockets in. Internally created and managed
+ // by go-plugin and deleted when the plugin is killed. Will be created
+ // inside TempDir if specified.
+ socketDir string
+}
+
+// ReattachConfig is used to configure a client to reattach to an
+// already-running plugin process. You can retrieve this information by
+// calling ReattachConfig on Client.
+type ReattachConfig struct {
+ Protocol Protocol
+ ProtocolVersion int
+ Addr net.Addr
+ Pid int
+
+ // ReattachFunc allows consumers to provide their own implementation of
+ // runner.AttachedRunner and attach to something other than a plain process.
+ // At least one of Pid or ReattachFunc must be set.
+ ReattachFunc runner.ReattachFunc
+
+ // Test is set to true if this is reattaching to to a plugin in "test mode"
+ // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the
+ // process and instead will rely on the plugin to terminate itself. This
+ // should not be used in non-test environments.
+ Test bool
+}
+
+// SecureConfig is used to configure a client to verify the integrity of an
+// executable before running. It does this by verifying the checksum is
+// expected. Hash is used to specify the hashing method to use when checksumming
+// the file. The configuration is verified by the client by calling the
+// SecureConfig.Check() function.
+//
+// The host process should ensure the checksum was provided by a trusted and
+// authoritative source. The binary should be installed in such a way that it
+// can not be modified by an unauthorized user between the time of this check
+// and the time of execution.
+type SecureConfig struct {
+ Checksum []byte
+ Hash hash.Hash
+}
+
+// Check takes the filepath to an executable and returns true if the checksum of
+// the file matches the checksum provided in the SecureConfig.
+func (s *SecureConfig) Check(filePath string) (bool, error) {
+ if len(s.Checksum) == 0 {
+ return false, ErrSecureConfigNoChecksum
+ }
+
+ if s.Hash == nil {
+ return false, ErrSecureConfigNoHash
+ }
+
+ file, err := os.Open(filePath)
+ if err != nil {
+ return false, err
+ }
+ defer file.Close()
+
+ _, err = io.Copy(s.Hash, file)
+ if err != nil {
+ return false, err
+ }
+
+ sum := s.Hash.Sum(nil)
+
+ return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil
+}
+
+// This makes sure all the managed subprocesses are killed and properly
+// logged. This should be called before the parent process running the
+// plugins exits.
+//
+// This must only be called _once_.
+func CleanupClients() {
+ // Set the killed to true so that we don't get unexpected panics
+ atomic.StoreUint32(&Killed, 1)
+
+ // Kill all the managed clients in parallel and use a WaitGroup
+ // to wait for them all to finish up.
+ var wg sync.WaitGroup
+ managedClientsLock.Lock()
+ for _, client := range managedClients {
+ wg.Add(1)
+
+ go func(client *Client) {
+ client.Kill()
+ wg.Done()
+ }(client)
+ }
+ managedClientsLock.Unlock()
+
+ wg.Wait()
+}
+
+// NewClient creates a new plugin client which manages the lifecycle of an external
+// plugin and gets the address for the RPC connection.
+//
+// The client must be cleaned up at some point by calling Kill(). If
+// the client is a managed client (created with ClientConfig.Managed) you
+// can just call CleanupClients at the end of your program and they will
+// be properly cleaned.
+func NewClient(config *ClientConfig) (c *Client) {
+ if config.MinPort == 0 && config.MaxPort == 0 {
+ config.MinPort = 10000
+ config.MaxPort = 25000
+ }
+
+ if config.StartTimeout == 0 {
+ config.StartTimeout = 1 * time.Minute
+ }
+
+ if config.Stderr == nil {
+ config.Stderr = ioutil.Discard
+ }
+
+ if config.SyncStdout == nil {
+ config.SyncStdout = io.Discard
+ }
+ if config.SyncStderr == nil {
+ config.SyncStderr = io.Discard
+ }
+
+ if config.AllowedProtocols == nil {
+ config.AllowedProtocols = []Protocol{ProtocolNetRPC}
+ }
+
+ if config.Logger == nil {
+ config.Logger = hclog.New(&hclog.LoggerOptions{
+ Output: hclog.DefaultOutput,
+ Level: hclog.Trace,
+ Name: "plugin",
+ })
+ }
+
+ if config.PluginLogBufferSize == 0 {
+ config.PluginLogBufferSize = defaultPluginLogBufferSize
+ }
+
+ c = &Client{
+ config: config,
+ logger: config.Logger,
+ }
+ if config.Managed {
+ managedClientsLock.Lock()
+ managedClients = append(managedClients, c)
+ managedClientsLock.Unlock()
+ }
+
+ return
+}
+
+// Client returns the protocol client for this connection.
+//
+// Subsequent calls to this will return the same client.
+func (c *Client) Client() (ClientProtocol, error) {
+ _, err := c.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.client != nil {
+ return c.client, nil
+ }
+
+ switch c.protocol {
+ case ProtocolNetRPC:
+ c.client, err = newRPCClient(c)
+
+ case ProtocolGRPC:
+ c.client, err = newGRPCClient(c.doneCtx, c)
+
+ default:
+ return nil, fmt.Errorf("unknown server protocol: %s", c.protocol)
+ }
+
+ if err != nil {
+ c.client = nil
+ return nil, err
+ }
+
+ return c.client, nil
+}
+
+// Tells whether or not the underlying process has exited.
+func (c *Client) Exited() bool {
+ c.l.Lock()
+ defer c.l.Unlock()
+ return c.exited
+}
+
+// killed is used in tests to check if a process failed to exit gracefully, and
+// needed to be killed.
+func (c *Client) killed() bool {
+ c.l.Lock()
+ defer c.l.Unlock()
+ return c.processKilled
+}
+
+// End the executing subprocess (if it is running) and perform any cleanup
+// tasks necessary such as capturing any remaining logs and so on.
+//
+// This method blocks until the process successfully exits.
+//
+// This method can safely be called multiple times.
+func (c *Client) Kill() {
+ // Grab a lock to read some private fields.
+ c.l.Lock()
+ runner := c.runner
+ addr := c.address
+ hostSocketDir := c.unixSocketCfg.socketDir
+ c.l.Unlock()
+
+ // If there is no runner or ID, there is nothing to kill.
+ if runner == nil || runner.ID() == "" {
+ return
+ }
+
+ defer func() {
+ // Wait for the all client goroutines to finish.
+ c.clientWaitGroup.Wait()
+
+ if hostSocketDir != "" {
+ os.RemoveAll(hostSocketDir)
+ }
+
+ // Make sure there is no reference to the old process after it has been
+ // killed.
+ c.l.Lock()
+ c.runner = nil
+ c.l.Unlock()
+ }()
+
+ // We need to check for address here. It is possible that the plugin
+ // started (process != nil) but has no address (addr == nil) if the
+ // plugin failed at startup. If we do have an address, we need to close
+ // the plugin net connections.
+ graceful := false
+ if addr != nil {
+ // Close the client to cleanly exit the process.
+ client, err := c.Client()
+ if err == nil {
+ err = client.Close()
+
+ // If there is no error, then we attempt to wait for a graceful
+ // exit. If there was an error, we assume that graceful cleanup
+ // won't happen and just force kill.
+ graceful = err == nil
+ if err != nil {
+ // If there was an error just log it. We're going to force
+ // kill in a moment anyways.
+ c.logger.Warn("error closing client during Kill", "err", err)
+ }
+ } else {
+ c.logger.Error("client", "error", err)
+ }
+ }
+
+ // If we're attempting a graceful exit, then we wait for a short period
+ // of time to allow that to happen. To wait for this we just wait on the
+ // doneCh which would be closed if the process exits.
+ if graceful {
+ select {
+ case <-c.doneCtx.Done():
+ c.logger.Debug("plugin exited")
+ return
+ case <-time.After(2 * time.Second):
+ }
+ }
+
+ // If graceful exiting failed, just kill it
+ c.logger.Warn("plugin failed to exit gracefully")
+ if err := runner.Kill(context.Background()); err != nil {
+ c.logger.Debug("error killing plugin", "error", err)
+ }
+
+ c.l.Lock()
+ c.processKilled = true
+ c.l.Unlock()
+}
+
+// Start the underlying subprocess, communicating with it to negotiate
+// a port for RPC connections, and returning the address to connect via RPC.
+//
+// This method is safe to call multiple times. Subsequent calls have no effect.
+// Once a client has been started once, it cannot be started again, even if
+// it was killed.
+func (c *Client) Start() (addr net.Addr, err error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.address != nil {
+ return c.address, nil
+ }
+
+ // If one of cmd or reattach isn't set, then it is an error. We wrap
+ // this in a {} for scoping reasons, and hopeful that the escape
+ // analysis will pop the stack here.
+ {
+ var mutuallyExclusiveOptions int
+ if c.config.Cmd != nil {
+ mutuallyExclusiveOptions += 1
+ }
+ if c.config.Reattach != nil {
+ mutuallyExclusiveOptions += 1
+ }
+ if c.config.RunnerFunc != nil {
+ mutuallyExclusiveOptions += 1
+ }
+ if mutuallyExclusiveOptions != 1 {
+ return nil, fmt.Errorf("exactly one of Cmd, or Reattach, or RunnerFunc must be set")
+ }
+
+ if c.config.SecureConfig != nil && c.config.Reattach != nil {
+ return nil, ErrSecureConfigAndReattach
+ }
+
+ if c.config.GRPCBrokerMultiplex && c.config.Reattach != nil {
+ return nil, fmt.Errorf("gRPC broker multiplexing is not supported with Reattach config")
+ }
+ }
+
+ if c.config.Reattach != nil {
+ return c.reattach()
+ }
+
+ if c.config.VersionedPlugins == nil {
+ c.config.VersionedPlugins = make(map[int]PluginSet)
+ }
+
+ // handle all plugins as versioned, using the handshake config as the default.
+ version := int(c.config.ProtocolVersion)
+
+ // Make sure we're not overwriting a real version 0. If ProtocolVersion was
+ // non-zero, then we have to just assume the user made sure that
+ // VersionedPlugins doesn't conflict.
+ if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil {
+ c.config.VersionedPlugins[version] = c.config.Plugins
+ }
+
+ var versionStrings []string
+ for v := range c.config.VersionedPlugins {
+ versionStrings = append(versionStrings, strconv.Itoa(v))
+ }
+
+ env := []string{
+ fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
+ fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
+ fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
+ fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")),
+ }
+ if c.config.GRPCBrokerMultiplex {
+ env = append(env, fmt.Sprintf("%s=true", envMultiplexGRPC))
+ }
+
+ cmd := c.config.Cmd
+ if cmd == nil {
+ // It's only possible to get here if RunnerFunc is non-nil, but we'll
+ // still use cmd as a spec to populate metadata for the external
+ // implementation to consume.
+ cmd = exec.Command("")
+ }
+ if !c.config.SkipHostEnv {
+ cmd.Env = append(cmd.Env, os.Environ()...)
+ }
+ cmd.Env = append(cmd.Env, env...)
+ cmd.Stdin = os.Stdin
+
+ if c.config.SecureConfig != nil {
+ if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
+ return nil, fmt.Errorf("error verifying checksum: %s", err)
+ } else if !ok {
+ return nil, ErrChecksumsDoNotMatch
+ }
+ }
+
+ // Setup a temporary certificate for client/server mtls, and send the public
+ // certificate to the plugin.
+ if c.config.AutoMTLS {
+ c.logger.Info("configuring client automatic mTLS")
+ certPEM, keyPEM, err := generateCert()
+ if err != nil {
+ c.logger.Error("failed to generate client certificate", "error", err)
+ return nil, err
+ }
+ cert, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ c.logger.Error("failed to parse client certificate", "error", err)
+ return nil, err
+ }
+
+ cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM))
+
+ c.config.TLSConfig = &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ MinVersion: tls.VersionTLS12,
+ ServerName: "localhost",
+ }
+ }
+
+ if c.config.UnixSocketConfig != nil {
+ c.unixSocketCfg = *c.config.UnixSocketConfig
+ }
+
+ if c.unixSocketCfg.Group != "" {
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketGroup, c.unixSocketCfg.Group))
+ }
+
+ var runner runner.Runner
+ switch {
+ case c.config.RunnerFunc != nil:
+ c.unixSocketCfg.socketDir, err = os.MkdirTemp(c.unixSocketCfg.TempDir, "plugin-dir")
+ if err != nil {
+ return nil, err
+ }
+ // os.MkdirTemp creates folders with 0o700, so if we have a group
+ // configured we need to make it group-writable.
+ if c.unixSocketCfg.Group != "" {
+ err = setGroupWritable(c.unixSocketCfg.socketDir, c.unixSocketCfg.Group, 0o770)
+ if err != nil {
+ return nil, err
+ }
+ }
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.socketDir))
+ c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.socketDir)
+
+ runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.socketDir)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ runner, err = cmdrunner.NewCmdRunner(c.logger, cmd)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+
+ c.runner = runner
+ startCtx, startCtxCancel := context.WithTimeout(context.Background(), c.config.StartTimeout)
+ defer startCtxCancel()
+ err = runner.Start(startCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Make sure the command is properly cleaned up if there is an error
+ defer func() {
+ rErr := recover()
+
+ if err != nil || rErr != nil {
+ runner.Kill(context.Background())
+ }
+
+ if rErr != nil {
+ panic(rErr)
+ }
+ }()
+
+ // Create a context for when we kill
+ c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
+
+ // Start goroutine that logs the stderr
+ c.clientWaitGroup.Add(1)
+ c.stderrWaitGroup.Add(1)
+ // logStderr calls Done()
+ go c.logStderr(runner.Name(), runner.Stderr())
+
+ c.clientWaitGroup.Add(1)
+ go func() {
+ // ensure the context is cancelled when we're done
+ defer c.ctxCancel()
+
+ defer c.clientWaitGroup.Done()
+
+ // wait to finish reading from stderr since the stderr pipe reader
+ // will be closed by the subsequent call to cmd.Wait().
+ c.stderrWaitGroup.Wait()
+
+ // Wait for the command to end.
+ err := runner.Wait(context.Background())
+ if err != nil {
+ c.logger.Error("plugin process exited", "plugin", runner.Name(), "id", runner.ID(), "error", err.Error())
+ } else {
+ // Log and make sure to flush the logs right away
+ c.logger.Info("plugin process exited", "plugin", runner.Name(), "id", runner.ID())
+ }
+
+ os.Stderr.Sync()
+
+ // Set that we exited, which takes a lock
+ c.l.Lock()
+ defer c.l.Unlock()
+ c.exited = true
+ }()
+
+ // Start a goroutine that is going to be reading the lines
+ // out of stdout
+ linesCh := make(chan string)
+ c.clientWaitGroup.Add(1)
+ go func() {
+ defer c.clientWaitGroup.Done()
+ defer close(linesCh)
+
+ scanner := bufio.NewScanner(runner.Stdout())
+ for scanner.Scan() {
+ linesCh <- scanner.Text()
+ }
+ if scanner.Err() != nil {
+ c.logger.Error("error encountered while scanning stdout", "error", scanner.Err())
+ }
+ }()
+
+ // Make sure after we exit we read the lines from stdout forever
+ // so they don't block since it is a pipe.
+ // The scanner goroutine above will close this, but track it with a wait
+ // group for completeness.
+ c.clientWaitGroup.Add(1)
+ defer func() {
+ go func() {
+ defer c.clientWaitGroup.Done()
+ for range linesCh {
+ }
+ }()
+ }()
+
+ // Some channels for the next step
+ timeout := time.After(c.config.StartTimeout)
+
+ // Start looking for the address
+ c.logger.Debug("waiting for RPC address", "plugin", runner.Name())
+ select {
+ case <-timeout:
+ err = errors.New("timeout while waiting for plugin to start")
+ case <-c.doneCtx.Done():
+ err = errors.New("plugin exited before we could connect")
+ case line, ok := <-linesCh:
+ // Trim the line and split by "|" in order to get the parts of
+ // the output.
+ line = strings.TrimSpace(line)
+ parts := strings.Split(line, "|")
+ if len(parts) < 4 {
+ errText := fmt.Sprintf("Unrecognized remote plugin message: %s", line)
+ if !ok {
+ errText += "\n" + "Failed to read any lines from plugin's stdout"
+ }
+ additionalNotes := runner.Diagnose(context.Background())
+ if additionalNotes != "" {
+ errText += "\n" + additionalNotes
+ }
+ err = errors.New(errText)
+ return
+ }
+
+ // Check the core protocol. Wrapped in a {} for scoping.
+ {
+ var coreProtocol int
+ coreProtocol, err = strconv.Atoi(parts[0])
+ if err != nil {
+ err = fmt.Errorf("Error parsing core protocol version: %s", err)
+ return
+ }
+
+ if coreProtocol != CoreProtocolVersion {
+ err = fmt.Errorf("Incompatible core API version with plugin. "+
+ "Plugin version: %s, Core version: %d\n\n"+
+ "To fix this, the plugin usually only needs to be recompiled.\n"+
+ "Please report this to the plugin author.", parts[0], CoreProtocolVersion)
+ return
+ }
+ }
+
+ // Test the API version
+ version, pluginSet, err := c.checkProtoVersion(parts[1])
+ if err != nil {
+ return addr, err
+ }
+
+ // set the Plugins value to the compatible set, so the version
+ // doesn't need to be passed through to the ClientProtocol
+ // implementation.
+ c.config.Plugins = pluginSet
+ c.negotiatedVersion = version
+ c.logger.Debug("using plugin", "version", version)
+
+ network, address, err := runner.PluginToHost(parts[2], parts[3])
+ if err != nil {
+ return addr, err
+ }
+
+ switch network {
+ case "tcp":
+ addr, err = net.ResolveTCPAddr("tcp", address)
+ case "unix":
+ addr, err = net.ResolveUnixAddr("unix", address)
+ default:
+ err = fmt.Errorf("Unknown address type: %s", address)
+ }
+
+ // If we have a server type, then record that. We default to net/rpc
+ // for backwards compatibility.
+ c.protocol = ProtocolNetRPC
+ if len(parts) >= 5 {
+ c.protocol = Protocol(parts[4])
+ }
+
+ found := false
+ for _, p := range c.config.AllowedProtocols {
+ if p == c.protocol {
+ found = true
+ break
+ }
+ }
+ if !found {
+ err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v",
+ c.protocol, c.config.AllowedProtocols)
+ return addr, err
+ }
+
+ // See if we have a TLS certificate from the server.
+ // Checking if the length is > 50 rules out catching the unused "extra"
+ // data returned from some older implementations.
+ if len(parts) >= 6 && len(parts[5]) > 50 {
+ err := c.loadServerCert(parts[5])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing server cert: %s", err)
+ }
+ }
+
+ if c.config.GRPCBrokerMultiplex && c.protocol == ProtocolGRPC {
+ if len(parts) <= 6 {
+ return nil, fmt.Errorf("%w; for Go plugins, you will need to update the "+
+ "github.com/hashicorp/go-plugin dependency and recompile", ErrGRPCBrokerMuxNotSupported)
+ }
+ if muxSupported, err := strconv.ParseBool(parts[6]); err != nil {
+ return nil, fmt.Errorf("error parsing %q as a boolean for gRPC broker multiplexing support", parts[6])
+ } else if !muxSupported {
+ return nil, ErrGRPCBrokerMuxNotSupported
+ }
+ }
+ }
+
+ c.address = addr
+ return
+}
+
+// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the
+// server, and load it as the RootCA and ClientCA for the client TLSConfig.
+func (c *Client) loadServerCert(cert string) error {
+ certPool := x509.NewCertPool()
+
+ asn1, err := base64.RawStdEncoding.DecodeString(cert)
+ if err != nil {
+ return err
+ }
+
+ x509Cert, err := x509.ParseCertificate([]byte(asn1))
+ if err != nil {
+ return err
+ }
+
+ certPool.AddCert(x509Cert)
+
+ c.config.TLSConfig.RootCAs = certPool
+ c.config.TLSConfig.ClientCAs = certPool
+ return nil
+}
+
+func (c *Client) reattach() (net.Addr, error) {
+ reattachFunc := c.config.Reattach.ReattachFunc
+ // For backwards compatibility default to cmdrunner.ReattachFunc
+ if reattachFunc == nil {
+ reattachFunc = cmdrunner.ReattachFunc(c.config.Reattach.Pid, c.config.Reattach.Addr)
+ }
+
+ r, err := reattachFunc()
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a context for when we kill
+ c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
+
+ c.clientWaitGroup.Add(1)
+ // Goroutine to mark exit status
+ go func(r runner.AttachedRunner) {
+ defer c.clientWaitGroup.Done()
+
+ // ensure the context is cancelled when we're done
+ defer c.ctxCancel()
+
+ // Wait for the process to die
+ r.Wait(context.Background())
+
+ // Log so we can see it
+ c.logger.Debug("reattached plugin process exited")
+
+ // Mark it
+ c.l.Lock()
+ defer c.l.Unlock()
+ c.exited = true
+ }(r)
+
+ // Set the address and protocol
+ c.address = c.config.Reattach.Addr
+ c.protocol = c.config.Reattach.Protocol
+ if c.protocol == "" {
+ // Default the protocol to net/rpc for backwards compatibility
+ c.protocol = ProtocolNetRPC
+ }
+
+ if c.config.Reattach.Test {
+ c.negotiatedVersion = c.config.Reattach.ProtocolVersion
+ } else {
+ // If we're in test mode, we do NOT set the runner. This avoids the
+ // runner being killed (the only purpose we have for setting c.runner
+ // when reattaching), since in test mode the process is responsible for
+ // exiting on its own.
+ c.runner = r
+ }
+
+ return c.address, nil
+}
+
+// checkProtoVersion returns the negotiated version and PluginSet.
+// This returns an error if the server returned an incompatible protocol
+// version, or an invalid handshake response.
+func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) {
+ serverVersion, err := strconv.Atoi(protoVersion)
+ if err != nil {
+ return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err)
+ }
+
+ // record these for the error message
+ var clientVersions []int
+
+ // all versions, including the legacy ProtocolVersion have been added to
+ // the versions set
+ for version, plugins := range c.config.VersionedPlugins {
+ clientVersions = append(clientVersions, version)
+
+ if serverVersion != version {
+ continue
+ }
+ return version, plugins, nil
+ }
+
+ return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+
+ "Plugin version: %d, Client versions: %d", serverVersion, clientVersions)
+}
+
+// ReattachConfig returns the information that must be provided to NewClient
+// to reattach to the plugin process that this client started. This is
+// useful for plugins that detach from their parent process.
+//
+// If this returns nil then the process hasn't been started yet. Please
+// call Start or Client before calling this.
+//
+// Clients who specified a RunnerFunc will need to populate their own
+// ReattachFunc in the returned ReattachConfig before it can be used.
+func (c *Client) ReattachConfig() *ReattachConfig {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.address == nil {
+ return nil
+ }
+
+ if c.config.Cmd != nil && c.config.Cmd.Process == nil {
+ return nil
+ }
+
+ // If we connected via reattach, just return the information as-is
+ if c.config.Reattach != nil {
+ return c.config.Reattach
+ }
+
+ reattach := &ReattachConfig{
+ Protocol: c.protocol,
+ Addr: c.address,
+ }
+
+ if c.config.Cmd != nil && c.config.Cmd.Process != nil {
+ reattach.Pid = c.config.Cmd.Process.Pid
+ }
+
+ return reattach
+}
+
+// Protocol returns the protocol of server on the remote end. This will
+// start the plugin process if it isn't already started. Errors from
+// starting the plugin are surpressed and ProtocolInvalid is returned. It
+// is recommended you call Start explicitly before calling Protocol to ensure
+// no errors occur.
+func (c *Client) Protocol() Protocol {
+ _, err := c.Start()
+ if err != nil {
+ return ProtocolInvalid
+ }
+
+ return c.protocol
+}
+
+func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) {
+ return func(_ string, _ time.Duration) (net.Conn, error) {
+ // Connect to the client
+ conn, err := net.Dial(addr.Network(), addr.String())
+ if err != nil {
+ return nil, err
+ }
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ // Make sure to set keep alive so that the connection doesn't die
+ tcpConn.SetKeepAlive(true)
+ }
+
+ return conn, nil
+ }
+}
+
+// dialer is compatible with grpc.WithDialer and creates the connection
+// to the plugin.
+func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
+ muxer, err := c.getGRPCMuxer(c.address)
+ if err != nil {
+ return nil, err
+ }
+
+ var conn net.Conn
+ if muxer.Enabled() {
+ conn, err = muxer.Dial()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ conn, err = netAddrDialer(c.address)("", timeout)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If we have a TLS config we wrap our connection. We only do this
+ // for net/rpc since gRPC uses its own mechanism for TLS.
+ if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil {
+ conn = tls.Client(conn, c.config.TLSConfig)
+ }
+
+ return conn, nil
+}
+
+func (c *Client) getGRPCMuxer(addr net.Addr) (*grpcmux.GRPCClientMuxer, error) {
+ if c.protocol != ProtocolGRPC || !c.config.GRPCBrokerMultiplex {
+ return nil, nil
+ }
+
+ var err error
+ c.grpcMuxerOnce.Do(func() {
+ c.grpcMuxer, err = grpcmux.NewGRPCClientMuxer(c.logger, addr)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return c.grpcMuxer, nil
+}
+
+func (c *Client) logStderr(name string, r io.Reader) {
+ defer c.clientWaitGroup.Done()
+ defer c.stderrWaitGroup.Done()
+ l := c.logger.Named(filepath.Base(name))
+
+ reader := bufio.NewReaderSize(r, c.config.PluginLogBufferSize)
+ // continuation indicates the previous line was a prefix
+ continuation := false
+
+ for {
+ line, isPrefix, err := reader.ReadLine()
+ switch {
+ case err == io.EOF:
+ return
+ case err != nil:
+ l.Error("reading plugin stderr", "error", err)
+ return
+ }
+
+ c.config.Stderr.Write(line)
+
+ // The line was longer than our max token size, so it's likely
+ // incomplete and won't unmarshal.
+ if isPrefix || continuation {
+ l.Debug(string(line))
+
+ // if we're finishing a continued line, add the newline back in
+ if !isPrefix {
+ c.config.Stderr.Write([]byte{'\n'})
+ }
+
+ continuation = isPrefix
+ continue
+ }
+
+ c.config.Stderr.Write([]byte{'\n'})
+
+ entry, err := parseJSON(line)
+ // If output is not JSON format, print directly to Debug
+ if err != nil {
+ // Attempt to infer the desired log level from the commonly used
+ // string prefixes
+ switch line := string(line); {
+ case strings.HasPrefix(line, "[TRACE]"):
+ l.Trace(line)
+ case strings.HasPrefix(line, "[DEBUG]"):
+ l.Debug(line)
+ case strings.HasPrefix(line, "[INFO]"):
+ l.Info(line)
+ case strings.HasPrefix(line, "[WARN]"):
+ l.Warn(line)
+ case strings.HasPrefix(line, "[ERROR]"):
+ l.Error(line)
+ default:
+ l.Debug(line)
+ }
+ } else {
+ out := flattenKVPairs(entry.KVPairs)
+
+ out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat))
+ switch hclog.LevelFromString(entry.Level) {
+ case hclog.Trace:
+ l.Trace(entry.Message, out...)
+ case hclog.Debug:
+ l.Debug(entry.Message, out...)
+ case hclog.Info:
+ l.Info(entry.Message, out...)
+ case hclog.Warn:
+ l.Warn(entry.Message, out...)
+ case hclog.Error:
+ l.Error(entry.Message, out...)
+ default:
+ // if there was no log level, it's likely this is unexpected
+ // json from something other than hclog, and we should output
+ // it verbatim.
+ l.Debug(string(line))
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/constants.go b/vendor/github.com/hashicorp/go-plugin/constants.go
new file mode 100644
index 00000000..e7f5bbe5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/constants.go
@@ -0,0 +1,16 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+const (
+ // EnvUnixSocketDir specifies the directory that _plugins_ should create unix
+ // sockets in. Does not affect client behavior.
+ EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR"
+
+ // EnvUnixSocketGroup specifies the owning, writable group to set for Unix
+ // sockets created by _plugins_. Does not affect client behavior.
+ EnvUnixSocketGroup = "PLUGIN_UNIX_SOCKET_GROUP"
+
+ envMultiplexGRPC = "PLUGIN_MULTIPLEX_GRPC"
+)
diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go
new file mode 100644
index 00000000..c5b96242
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/discover.go
@@ -0,0 +1,31 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "path/filepath"
+)
+
+// Discover discovers plugins that are in a given directory.
+//
+// The directory doesn't need to be absolute. For example, "." will work fine.
+//
+// This currently assumes any file matching the glob is a plugin.
+// In the future this may be smarter about checking that a file is
+// executable and so on.
+//
+// TODO: test
+func Discover(glob, dir string) ([]string, error) {
+ var err error
+
+ // Make the directory absolute if it isn't already
+ if !filepath.IsAbs(dir) {
+ dir, err = filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return filepath.Glob(filepath.Join(dir, glob))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go
new file mode 100644
index 00000000..e62a2191
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/error.go
@@ -0,0 +1,27 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+// This is a type that wraps error types so that they can be messaged
+// across RPC channels. Since "error" is an interface, we can't always
+// gob-encode the underlying structure. This is a valid error interface
+// implementer that we will push across.
+type BasicError struct {
+ Message string
+}
+
+// NewBasicError is used to create a BasicError.
+//
+// err is allowed to be nil.
+func NewBasicError(err error) *BasicError {
+ if err == nil {
+ return nil
+ }
+
+ return &BasicError{err.Error()}
+}
+
+func (e *BasicError) Error() string {
+ return e.Message
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
new file mode 100644
index 00000000..5b17e37f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
@@ -0,0 +1,654 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/go-plugin/internal/grpcmux"
+ "github.com/hashicorp/go-plugin/internal/plugin"
+ "github.com/hashicorp/go-plugin/runner"
+
+ "github.com/oklog/run"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+// streamer interface is used in the broker to send/receive connection
+// information.
+type streamer interface {
+ Send(*plugin.ConnInfo) error
+ Recv() (*plugin.ConnInfo, error)
+ Close()
+}
+
+// sendErr is used to pass errors back during a send.
+type sendErr struct {
+ i *plugin.ConnInfo
+ ch chan error
+}
+
+// gRPCBrokerServer is used by the plugin to start a stream and to send
+// connection information to/from the plugin. Implements GRPCBrokerServer and
+// streamer interfaces.
+type gRPCBrokerServer struct {
+ plugin.UnimplementedGRPCBrokerServer
+
+ // send is used to send connection info to the gRPC stream.
+ send chan *sendErr
+
+ // recv is used to receive connection info from the gRPC stream.
+ recv chan *plugin.ConnInfo
+
+ // quit closes down the stream.
+ quit chan struct{}
+
+ // o is used to ensure we close the quit channel only once.
+ o sync.Once
+}
+
+func newGRPCBrokerServer() *gRPCBrokerServer {
+ return &gRPCBrokerServer{
+ send: make(chan *sendErr),
+ recv: make(chan *plugin.ConnInfo),
+ quit: make(chan struct{}),
+ }
+}
+
+// StartStream implements the GRPCBrokerServer interface and will block until
+// the quit channel is closed or the context reports Done. The stream will pass
+// connection information to/from the client.
+func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error {
+ doneCh := stream.Context().Done()
+ defer s.Close()
+
+ // Proccess send stream
+ go func() {
+ for {
+ select {
+ case <-doneCh:
+ return
+ case <-s.quit:
+ return
+ case se := <-s.send:
+ err := stream.Send(se.i)
+ se.ch <- err
+ }
+ }
+ }()
+
+ // Process receive stream
+ for {
+ i, err := stream.Recv()
+ if err != nil {
+ return err
+ }
+ select {
+ case <-doneCh:
+ return nil
+ case <-s.quit:
+ return nil
+ case s.recv <- i:
+ }
+ }
+
+ return nil
+}
+
+// Send is used by the GRPCBroker to pass connection information into the stream
+// to the client.
+func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error {
+ ch := make(chan error)
+ defer close(ch)
+
+ select {
+ case <-s.quit:
+ return errors.New("broker closed")
+ case s.send <- &sendErr{
+ i: i,
+ ch: ch,
+ }:
+ }
+
+ return <-ch
+}
+
+// Recv is used by the GRPCBroker to pass connection information that has been
+// sent from the client from the stream to the broker.
+func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) {
+ select {
+ case <-s.quit:
+ return nil, errors.New("broker closed")
+ case i := <-s.recv:
+ return i, nil
+ }
+}
+
+// Close closes the quit channel, shutting down the stream.
+func (s *gRPCBrokerServer) Close() {
+ s.o.Do(func() {
+ close(s.quit)
+ })
+}
+
+// gRPCBrokerClientImpl is used by the client to start a stream and to send
+// connection information to/from the client. Implements GRPCBrokerClient and
+// streamer interfaces.
+type gRPCBrokerClientImpl struct {
+ // client is the underlying GRPC client used to make calls to the server.
+ client plugin.GRPCBrokerClient
+
+ // send is used to send connection info to the gRPC stream.
+ send chan *sendErr
+
+ // recv is used to receive connection info from the gRPC stream.
+ recv chan *plugin.ConnInfo
+
+ // quit closes down the stream.
+ quit chan struct{}
+
+ // o is used to ensure we close the quit channel only once.
+ o sync.Once
+}
+
+func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl {
+ return &gRPCBrokerClientImpl{
+ client: plugin.NewGRPCBrokerClient(conn),
+ send: make(chan *sendErr),
+ recv: make(chan *plugin.ConnInfo),
+ quit: make(chan struct{}),
+ }
+}
+
+// StartStream implements the GRPCBrokerClient interface and will block until
+// the quit channel is closed or the context reports Done. The stream will pass
+// connection information to/from the plugin.
+func (s *gRPCBrokerClientImpl) StartStream() error {
+ ctx, cancelFunc := context.WithCancel(context.Background())
+ defer cancelFunc()
+ defer s.Close()
+
+ stream, err := s.client.StartStream(ctx)
+ if err != nil {
+ return err
+ }
+ doneCh := stream.Context().Done()
+
+ go func() {
+ for {
+ select {
+ case <-doneCh:
+ return
+ case <-s.quit:
+ return
+ case se := <-s.send:
+ err := stream.Send(se.i)
+ se.ch <- err
+ }
+ }
+ }()
+
+ for {
+ i, err := stream.Recv()
+ if err != nil {
+ return err
+ }
+ select {
+ case <-doneCh:
+ return nil
+ case <-s.quit:
+ return nil
+ case s.recv <- i:
+ }
+ }
+
+ return nil
+}
+
+// Send is used by the GRPCBroker to pass connection information into the stream
+// to the plugin.
+func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error {
+ ch := make(chan error)
+ defer close(ch)
+
+ select {
+ case <-s.quit:
+ return errors.New("broker closed")
+ case s.send <- &sendErr{
+ i: i,
+ ch: ch,
+ }:
+ }
+
+ return <-ch
+}
+
+// Recv is used by the GRPCBroker to pass connection information that has been
+// sent from the plugin to the broker.
+func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) {
+ select {
+ case <-s.quit:
+ return nil, errors.New("broker closed")
+ case i := <-s.recv:
+ return i, nil
+ }
+}
+
+// Close closes the quit channel, shutting down the stream.
+func (s *gRPCBrokerClientImpl) Close() {
+ s.o.Do(func() {
+ close(s.quit)
+ })
+}
+
+// GRPCBroker is responsible for brokering connections by unique ID.
+//
+// It is used by plugins to create multiple gRPC connections and data
+// streams between the plugin process and the host process.
+//
+// This allows a plugin to request a channel with a specific ID to connect to
+// or accept a connection from, and the broker handles the details of
+// holding these channels open while they're being negotiated.
+//
+// The Plugin interface has access to these for both Server and Client.
+// The broker can be used by either (optionally) to reserve and connect to
+// new streams. This is useful for complex args and return values,
+// or anything else you might need a data stream for.
+type GRPCBroker struct {
+ nextId uint32
+ streamer streamer
+ tls *tls.Config
+ doneCh chan struct{}
+ o sync.Once
+
+ clientStreams map[uint32]*gRPCBrokerPending
+ serverStreams map[uint32]*gRPCBrokerPending
+
+ unixSocketCfg UnixSocketConfig
+ addrTranslator runner.AddrTranslator
+
+ dialMutex sync.Mutex
+
+ muxer grpcmux.GRPCMuxer
+
+ sync.Mutex
+}
+
+type gRPCBrokerPending struct {
+ ch chan *plugin.ConnInfo
+ doneCh chan struct{}
+ once sync.Once
+}
+
+func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator, muxer grpcmux.GRPCMuxer) *GRPCBroker {
+ return &GRPCBroker{
+ streamer: s,
+ tls: tls,
+ doneCh: make(chan struct{}),
+
+ clientStreams: make(map[uint32]*gRPCBrokerPending),
+ serverStreams: make(map[uint32]*gRPCBrokerPending),
+ muxer: muxer,
+
+ unixSocketCfg: unixSocketCfg,
+ addrTranslator: addrTranslator,
+ }
+}
+
+// Accept accepts a connection by ID.
+//
+// This should not be called multiple times with the same ID at one time.
+func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) {
+ if b.muxer.Enabled() {
+ p := b.getServerStream(id)
+ go func() {
+ err := b.listenForKnocks(id)
+ if err != nil {
+ log.Printf("[ERR]: error listening for knocks, id: %d, error: %s", id, err)
+ }
+ }()
+
+ ln, err := b.muxer.Listener(id, p.doneCh)
+ if err != nil {
+ return nil, err
+ }
+
+ ln = &rmListener{
+ Listener: ln,
+ close: func() error {
+ // We could have multiple listeners on the same ID, so use sync.Once
+ // for closing doneCh to ensure we don't get a panic.
+ p.once.Do(func() {
+ close(p.doneCh)
+ })
+
+ b.Lock()
+ defer b.Unlock()
+
+ // No longer need to listen for knocks once the listener is closed.
+ delete(b.serverStreams, id)
+
+ return nil
+ },
+ }
+
+ return ln, nil
+ }
+
+ listener, err := serverListener(b.unixSocketCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ advertiseNet := listener.Addr().Network()
+ advertiseAddr := listener.Addr().String()
+ if b.addrTranslator != nil {
+ advertiseNet, advertiseAddr, err = b.addrTranslator.HostToPlugin(advertiseNet, advertiseAddr)
+ if err != nil {
+ return nil, err
+ }
+ }
+ err = b.streamer.Send(&plugin.ConnInfo{
+ ServiceId: id,
+ Network: advertiseNet,
+ Address: advertiseAddr,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return listener, nil
+}
+
+// AcceptAndServe is used to accept a specific stream ID and immediately
+// serve a gRPC server on that stream ID. This is used to easily serve
+// complex arguments. Each AcceptAndServe call opens a new listener socket and
+// sends the connection info down the stream to the dialer. Since a new
+// connection is opened every call, these calls should be used sparingly.
+// Multiple gRPC server implementations can be registered to a single
+// AcceptAndServe call.
+func (b *GRPCBroker) AcceptAndServe(id uint32, newGRPCServer func([]grpc.ServerOption) *grpc.Server) {
+ ln, err := b.Accept(id)
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
+ return
+ }
+ defer ln.Close()
+
+ var opts []grpc.ServerOption
+ if b.tls != nil {
+ opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))}
+ }
+
+ server := newGRPCServer(opts)
+
+ // Here we use a run group to close this goroutine if the server is shutdown
+ // or the broker is shutdown.
+ var g run.Group
+ {
+ // Serve on the listener, if shutting down call GracefulStop.
+ g.Add(func() error {
+ return server.Serve(ln)
+ }, func(err error) {
+ server.GracefulStop()
+ })
+ }
+ {
+ // block on the closeCh or the doneCh. If we are shutting down close the
+ // closeCh.
+ closeCh := make(chan struct{})
+ g.Add(func() error {
+ select {
+ case <-b.doneCh:
+ case <-closeCh:
+ }
+ return nil
+ }, func(err error) {
+ close(closeCh)
+ })
+ }
+
+ // Block until we are done
+ g.Run()
+}
+
+// Close closes the stream and all servers.
+func (b *GRPCBroker) Close() error {
+ b.streamer.Close()
+ b.o.Do(func() {
+ close(b.doneCh)
+ })
+ return nil
+}
+
+func (b *GRPCBroker) listenForKnocks(id uint32) error {
+ p := b.getServerStream(id)
+ for {
+ select {
+ case msg := <-p.ch:
+ // Shouldn't be possible.
+ if msg.ServiceId != id {
+ return fmt.Errorf("knock received with wrong service ID; expected %d but got %d", id, msg.ServiceId)
+ }
+
+ // Also shouldn't be possible.
+ if msg.Knock == nil || !msg.Knock.Knock || msg.Knock.Ack {
+ return fmt.Errorf("knock received for service ID %d with incorrect values; knock=%+v", id, msg.Knock)
+ }
+
+ // Successful knock, open the door for the given ID.
+ var ackError string
+ err := b.muxer.AcceptKnock(id)
+ if err != nil {
+ ackError = err.Error()
+ }
+
+ // Send back an acknowledgement to allow the client to start dialling.
+ err = b.streamer.Send(&plugin.ConnInfo{
+ ServiceId: id,
+ Knock: &plugin.ConnInfo_Knock{
+ Knock: true,
+ Ack: true,
+ Error: ackError,
+ },
+ })
+ if err != nil {
+ return fmt.Errorf("error sending back knock acknowledgement: %w", err)
+ }
+ case <-p.doneCh:
+ return nil
+ }
+ }
+}
+
+func (b *GRPCBroker) knock(id uint32) error {
+ // Send a knock.
+ err := b.streamer.Send(&plugin.ConnInfo{
+ ServiceId: id,
+ Knock: &plugin.ConnInfo_Knock{
+ Knock: true,
+ },
+ })
+ if err != nil {
+ return err
+ }
+
+ // Wait for the ack.
+ p := b.getClientStream(id)
+ select {
+ case msg := <-p.ch:
+ if msg.ServiceId != id {
+ return fmt.Errorf("handshake failed for multiplexing on id %d; got response for %d", id, msg.ServiceId)
+ }
+ if msg.Knock == nil || !msg.Knock.Knock || !msg.Knock.Ack {
+ return fmt.Errorf("handshake failed for multiplexing on id %d; expected knock and ack, but got %+v", id, msg.Knock)
+ }
+ if msg.Knock.Error != "" {
+ return fmt.Errorf("failed to knock for id %d: %s", id, msg.Knock.Error)
+ }
+ case <-time.After(5 * time.Second):
+ return fmt.Errorf("timeout waiting for multiplexing knock handshake on id %d", id)
+ }
+
+ return nil
+}
+
+func (b *GRPCBroker) muxDial(id uint32) func(string, time.Duration) (net.Conn, error) {
+ return func(string, time.Duration) (net.Conn, error) {
+ b.dialMutex.Lock()
+ defer b.dialMutex.Unlock()
+
+ // Tell the other side the listener ID it should give the next stream to.
+ err := b.knock(id)
+ if err != nil {
+ return nil, fmt.Errorf("failed to knock before dialling client: %w", err)
+ }
+
+ conn, err := b.muxer.Dial()
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+ }
+}
+
+// Dial opens a connection by ID.
+func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) {
+ if b.muxer.Enabled() {
+ return dialGRPCConn(b.tls, b.muxDial(id))
+ }
+
+ var c *plugin.ConnInfo
+
+ // Open the stream
+ p := b.getClientStream(id)
+ select {
+ case c = <-p.ch:
+ close(p.doneCh)
+ case <-time.After(5 * time.Second):
+ return nil, fmt.Errorf("timeout waiting for connection info")
+ }
+
+ network, address := c.Network, c.Address
+ if b.addrTranslator != nil {
+ network, address, err = b.addrTranslator.PluginToHost(network, address)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var addr net.Addr
+ switch network {
+ case "tcp":
+ addr, err = net.ResolveTCPAddr("tcp", address)
+ case "unix":
+ addr, err = net.ResolveUnixAddr("unix", address)
+ default:
+ err = fmt.Errorf("Unknown address type: %s", c.Address)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return dialGRPCConn(b.tls, netAddrDialer(addr))
+}
+
+// NextId returns a unique ID to use next.
+//
+// It is possible for very long-running plugin hosts to wrap this value,
+// though it would require a very large amount of calls. In practice
+// we've never seen it happen.
+func (m *GRPCBroker) NextId() uint32 {
+ return atomic.AddUint32(&m.nextId, 1)
+}
+
+// Run starts the brokering and should be executed in a goroutine, since it
+// blocks forever, or until the session closes.
+//
+// Uses of GRPCBroker never need to call this. It is called internally by
+// the plugin host/client.
+func (m *GRPCBroker) Run() {
+ for {
+ msg, err := m.streamer.Recv()
+ if err != nil {
+ // Once we receive an error, just exit
+ break
+ }
+
+ // Initialize the waiter
+ var p *gRPCBrokerPending
+ if msg.Knock != nil && msg.Knock.Knock && !msg.Knock.Ack {
+ p = m.getServerStream(msg.ServiceId)
+ // The server side doesn't close the channel immediately as it needs
+ // to continuously listen for knocks.
+ } else {
+ p = m.getClientStream(msg.ServiceId)
+ go m.timeoutWait(msg.ServiceId, p)
+ }
+ select {
+ case p.ch <- msg:
+ default:
+ }
+ }
+}
+
+// getClientStream is a buffer to receive new connection info and knock acks
+// by stream ID.
+func (m *GRPCBroker) getClientStream(id uint32) *gRPCBrokerPending {
+ m.Lock()
+ defer m.Unlock()
+
+ p, ok := m.clientStreams[id]
+ if ok {
+ return p
+ }
+
+ m.clientStreams[id] = &gRPCBrokerPending{
+ ch: make(chan *plugin.ConnInfo, 1),
+ doneCh: make(chan struct{}),
+ }
+ return m.clientStreams[id]
+}
+
+// getServerStream is a buffer to receive knocks to a multiplexed stream ID
+// that its side is listening on. Not used unless multiplexing is enabled.
+func (m *GRPCBroker) getServerStream(id uint32) *gRPCBrokerPending {
+ m.Lock()
+ defer m.Unlock()
+
+ p, ok := m.serverStreams[id]
+ if ok {
+ return p
+ }
+
+ m.serverStreams[id] = &gRPCBrokerPending{
+ ch: make(chan *plugin.ConnInfo, 1),
+ doneCh: make(chan struct{}),
+ }
+ return m.serverStreams[id]
+}
+
+func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) {
+ // Wait for the stream to either be picked up and connected, or
+ // for a timeout.
+ select {
+ case <-p.doneCh:
+ case <-time.After(5 * time.Second):
+ }
+
+ m.Lock()
+ defer m.Unlock()
+
+ // Delete the stream so no one else can grab it
+ delete(m.clientStreams, id)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go
new file mode 100644
index 00000000..627649d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go
@@ -0,0 +1,134 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "net"
+ "time"
+
+ "github.com/hashicorp/go-plugin/internal/plugin"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/health/grpc_health_v1"
+)
+
+func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ // Build dialing options.
+ opts := make([]grpc.DialOption, 0)
+
+ // We use a custom dialer so that we can connect over unix domain sockets.
+ opts = append(opts, grpc.WithDialer(dialer))
+
+ // Fail right away
+ opts = append(opts, grpc.FailOnNonTempDialError(true))
+
+ // If we have no TLS configuration set, we need to explicitly tell grpc
+ // that we're connecting with an insecure connection.
+ if tls == nil {
+ opts = append(opts, grpc.WithInsecure())
+ } else {
+ opts = append(opts, grpc.WithTransportCredentials(
+ credentials.NewTLS(tls)))
+ }
+
+ opts = append(opts,
+ grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)),
+ grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32)))
+
+ // Add our custom options if we have any
+ opts = append(opts, dialOpts...)
+
+ // Connect. Note the first parameter is unused because we use a custom
+ // dialer that has the state to see the address.
+ conn, err := grpc.Dial("unused", opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+// newGRPCClient creates a new GRPCClient. The Client argument is expected
+// to be successfully started already with a lock held.
+func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) {
+ conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ muxer, err := c.getGRPCMuxer(c.address)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start the broker.
+ brokerGRPCClient := newGRPCBrokerClient(conn)
+ broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner, muxer)
+ go broker.Run()
+ go brokerGRPCClient.StartStream()
+
+ // Start the stdio client
+ stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn)
+ if err != nil {
+ return nil, err
+ }
+ go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr)
+
+ cl := &GRPCClient{
+ Conn: conn,
+ Plugins: c.config.Plugins,
+ doneCtx: doneCtx,
+ broker: broker,
+ controller: plugin.NewGRPCControllerClient(conn),
+ }
+
+ return cl, nil
+}
+
+// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types.
+type GRPCClient struct {
+ Conn *grpc.ClientConn
+ Plugins map[string]Plugin
+
+ doneCtx context.Context
+ broker *GRPCBroker
+
+ controller plugin.GRPCControllerClient
+}
+
+// ClientProtocol impl.
+func (c *GRPCClient) Close() error {
+ c.broker.Close()
+ c.controller.Shutdown(c.doneCtx, &plugin.Empty{})
+ return c.Conn.Close()
+}
+
+// ClientProtocol impl.
+func (c *GRPCClient) Dispense(name string) (interface{}, error) {
+ raw, ok := c.Plugins[name]
+ if !ok {
+ return nil, fmt.Errorf("unknown plugin type: %s", name)
+ }
+
+ p, ok := raw.(GRPCPlugin)
+ if !ok {
+ return nil, fmt.Errorf("plugin %q doesn't support gRPC", name)
+ }
+
+ return p.GRPCClient(c.doneCtx, c.broker, c.Conn)
+}
+
+// ClientProtocol impl.
+func (c *GRPCClient) Ping() error {
+ client := grpc_health_v1.NewHealthClient(c.Conn)
+ _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{
+ Service: GRPCServiceName,
+ })
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go
new file mode 100644
index 00000000..2085356c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go
@@ -0,0 +1,26 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "context"
+
+ "github.com/hashicorp/go-plugin/internal/plugin"
+)
+
+// GRPCControllerServer handles shutdown calls to terminate the server when the
+// plugin client is closed.
+type grpcControllerServer struct {
+ server *GRPCServer
+}
+
+// Shutdown stops the grpc server. It first will attempt a graceful stop, then a
+// full stop on the server.
+func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) {
+ resp := &plugin.Empty{}
+
+ // TODO: figure out why GracefullStop doesn't work.
+ s.server.Stop()
+ return resp, nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go
new file mode 100644
index 00000000..a5f40c7f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go
@@ -0,0 +1,167 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-plugin/internal/grpcmux"
+ "github.com/hashicorp/go-plugin/internal/plugin"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/health"
+ "google.golang.org/grpc/health/grpc_health_v1"
+ "google.golang.org/grpc/reflection"
+)
+
+// GRPCServiceName is the name of the service that the health check should
+// return as passing.
+const GRPCServiceName = "plugin"
+
+// DefaultGRPCServer can be used with the "GRPCServer" field for Server
+// as a default factory method to create a gRPC server with no extra options.
+func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server {
+ return grpc.NewServer(opts...)
+}
+
+// GRPCServer is a ServerType implementation that serves plugins over
+// gRPC. This allows plugins to easily be written for other languages.
+//
+// The GRPCServer outputs a custom configuration as a base64-encoded
+// JSON structure represented by the GRPCServerConfig config structure.
+type GRPCServer struct {
+ // Plugins are the list of plugins to serve.
+ Plugins map[string]Plugin
+
+ // Server is the actual server that will accept connections. This
+ // will be used for plugin registration as well.
+ Server func([]grpc.ServerOption) *grpc.Server
+
+ // TLS should be the TLS configuration if available. If this is nil,
+ // the connection will not have transport security.
+ TLS *tls.Config
+
+ // DoneCh is the channel that is closed when this server has exited.
+ DoneCh chan struct{}
+
+ // Stdout/StderrLis are the readers for stdout/stderr that will be copied
+ // to the stdout/stderr connection that is output.
+ Stdout io.Reader
+ Stderr io.Reader
+
+ config GRPCServerConfig
+ server *grpc.Server
+ broker *GRPCBroker
+ stdioServer *grpcStdioServer
+
+ logger hclog.Logger
+
+ muxer *grpcmux.GRPCServerMuxer
+}
+
+// ServerProtocol impl.
+func (s *GRPCServer) Init() error {
+ // Create our server
+ var opts []grpc.ServerOption
+ if s.TLS != nil {
+ opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS)))
+ }
+ s.server = s.Server(opts)
+
+ // Register the health service
+ healthCheck := health.NewServer()
+ healthCheck.SetServingStatus(
+ GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
+ grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
+
+ // Register the reflection service
+ reflection.Register(s.server)
+
+ // Register the broker service
+ brokerServer := newGRPCBrokerServer()
+ plugin.RegisterGRPCBrokerServer(s.server, brokerServer)
+ s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil, s.muxer)
+ go s.broker.Run()
+
+ // Register the controller
+ controllerServer := &grpcControllerServer{server: s}
+ plugin.RegisterGRPCControllerServer(s.server, controllerServer)
+
+ // Register the stdio service
+ s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr)
+ plugin.RegisterGRPCStdioServer(s.server, s.stdioServer)
+
+ // Register all our plugins onto the gRPC server.
+ for k, raw := range s.Plugins {
+ p, ok := raw.(GRPCPlugin)
+ if !ok {
+ return fmt.Errorf("%q is not a GRPC-compatible plugin", k)
+ }
+
+ if err := p.GRPCServer(s.broker, s.server); err != nil {
+ return fmt.Errorf("error registering %q: %s", k, err)
+ }
+ }
+
+ return nil
+}
+
+// Stop calls Stop on the underlying grpc.Server and Close on the underlying
+// grpc.Broker if present.
+func (s *GRPCServer) Stop() {
+ s.server.Stop()
+
+ if s.broker != nil {
+ s.broker.Close()
+ s.broker = nil
+ }
+}
+
+// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on
+// the underlying grpc.Broker if present.
+func (s *GRPCServer) GracefulStop() {
+ s.server.GracefulStop()
+
+ if s.broker != nil {
+ s.broker.Close()
+ s.broker = nil
+ }
+}
+
+// Config is the GRPCServerConfig encoded as JSON then base64.
+func (s *GRPCServer) Config() string {
+ // Create a buffer that will contain our final contents
+ var buf bytes.Buffer
+
+ // Wrap the base64 encoding with JSON encoding.
+ if err := json.NewEncoder(&buf).Encode(s.config); err != nil {
+ // We panic since ths shouldn't happen under any scenario. We
+ // carefully control the structure being encoded here and it should
+ // always be successful.
+ panic(err)
+ }
+
+ return buf.String()
+}
+
+func (s *GRPCServer) Serve(lis net.Listener) {
+ defer close(s.DoneCh)
+ err := s.server.Serve(lis)
+ if err != nil {
+ s.logger.Error("grpc server", "error", err)
+ }
+}
+
+// GRPCServerConfig is the extra configuration passed along for consumers
+// to facilitate using GRPC plugins.
+type GRPCServerConfig struct {
+ StdoutAddr string `json:"stdout_addr"`
+ StderrAddr string `json:"stderr_addr"`
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
new file mode 100644
index 00000000..ae06c116
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
@@ -0,0 +1,210 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "io"
+
+ empty "github.com/golang/protobuf/ptypes/empty"
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-plugin/internal/plugin"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of
+// stdio data. This is currently 1 KB for no reason other than that seems like
+// enough (stdio data isn't that common) and is fairly low.
+const grpcStdioBuffer = 1 * 1024
+
+// grpcStdioServer implements the Stdio service and streams stdiout/stderr.
+type grpcStdioServer struct {
+ stdoutCh <-chan []byte
+ stderrCh <-chan []byte
+}
+
+// newGRPCStdioServer creates a new grpcStdioServer and starts the stream
+// copying for the given out and err readers.
+//
+// This must only be called ONCE per srcOut, srcErr.
+func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer {
+ stdoutCh := make(chan []byte)
+ stderrCh := make(chan []byte)
+
+ // Begin copying the streams
+ go copyChan(log, stdoutCh, srcOut)
+ go copyChan(log, stderrCh, srcErr)
+
+ // Construct our server
+ return &grpcStdioServer{
+ stdoutCh: stdoutCh,
+ stderrCh: stderrCh,
+ }
+}
+
+// StreamStdio streams our stdout/err as the response.
+func (s *grpcStdioServer) StreamStdio(
+ _ *empty.Empty,
+ srv plugin.GRPCStdio_StreamStdioServer,
+) error {
+ // Share the same data value between runs. Sending this over the wire
+ // marshals it so we can reuse this.
+ var data plugin.StdioData
+
+ for {
+ // Read our data
+ select {
+ case data.Data = <-s.stdoutCh:
+ data.Channel = plugin.StdioData_STDOUT
+
+ case data.Data = <-s.stderrCh:
+ data.Channel = plugin.StdioData_STDERR
+
+ case <-srv.Context().Done():
+ return nil
+ }
+
+ // Not sure if this is possible, but if we somehow got here and
+ // we didn't populate any data at all, then just continue.
+ if len(data.Data) == 0 {
+ continue
+ }
+
+ // Send our data to the client.
+ if err := srv.Send(&data); err != nil {
+ return err
+ }
+ }
+}
+
+// grpcStdioClient wraps the stdio service as a client to copy
+// the stdio data to output writers.
+type grpcStdioClient struct {
+ log hclog.Logger
+ stdioClient plugin.GRPCStdio_StreamStdioClient
+}
+
+// newGRPCStdioClient creates a grpcStdioClient. This will perform the
+// initial connection to the stdio service. If the stdio service is unavailable
+// then this will be a no-op. This allows this to work without error for
+// plugins that don't support this.
+func newGRPCStdioClient(
+ ctx context.Context,
+ log hclog.Logger,
+ conn *grpc.ClientConn,
+) (*grpcStdioClient, error) {
+ client := plugin.NewGRPCStdioClient(conn)
+
+ // Connect immediately to the endpoint
+ stdioClient, err := client.StreamStdio(ctx, &empty.Empty{})
+
+ // If we get an Unavailable or Unimplemented error, this means that the plugin isn't
+ // updated and linking to the latest version of go-plugin that supports
+ // this. We fall back to the previous behavior of just not syncing anything.
+ if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented {
+ log.Warn("stdio service not available, stdout/stderr syncing unavailable")
+ stdioClient = nil
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &grpcStdioClient{
+ log: log,
+ stdioClient: stdioClient,
+ }, nil
+}
+
+// Run starts the loop that receives stdio data and writes it to the given
+// writers. This blocks and should be run in a goroutine.
+func (c *grpcStdioClient) Run(stdout, stderr io.Writer) {
+ // This will be nil if stdio is not supported by the plugin
+ if c.stdioClient == nil {
+ c.log.Warn("stdio service unavailable, run will do nothing")
+ return
+ }
+
+ for {
+ c.log.Trace("waiting for stdio data")
+ data, err := c.stdioClient.Recv()
+ if err != nil {
+ if err == io.EOF ||
+ status.Code(err) == codes.Unavailable ||
+ status.Code(err) == codes.Canceled ||
+ status.Code(err) == codes.Unimplemented ||
+ err == context.Canceled {
+ c.log.Debug("received EOF, stopping recv loop", "err", err)
+ return
+ }
+
+ c.log.Error("error receiving data", "err", err)
+ return
+ }
+
+ // Determine our output writer based on channel
+ var w io.Writer
+ switch data.Channel {
+ case plugin.StdioData_STDOUT:
+ w = stdout
+
+ case plugin.StdioData_STDERR:
+ w = stderr
+
+ default:
+ c.log.Warn("unknown channel, dropping", "channel", data.Channel)
+ continue
+ }
+
+ // Write! In the event of an error we just continue.
+ if c.log.IsTrace() {
+ c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data))
+ }
+ if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil {
+ c.log.Error("failed to copy all bytes", "err", err)
+ }
+ }
+}
+
+// copyChan copies an io.Reader into a channel.
+func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) {
+ bufsrc := bufio.NewReader(src)
+
+ for {
+ // Make our data buffer. We allocate a new one per loop iteration
+ // so that we can send it over the channel.
+ var data [1024]byte
+
+ // Read the data, this will block until data is available
+ n, err := bufsrc.Read(data[:])
+
+ // We have to check if we have data BEFORE err != nil. The bufio
+ // docs guarantee n == 0 on EOF but its better to be safe here.
+ if n > 0 {
+ // We have data! Send it on the channel. This will block if there
+ // is no reader on the other side. We expect that go-plugin will
+ // connect immediately to the stdio server to drain this so we want
+ // this block to happen for backpressure.
+ dst <- data[:n]
+ }
+
+ // If we hit EOF we're done copying
+ if err == io.EOF {
+ log.Debug("stdio EOF, exiting copy loop")
+ return
+ }
+
+ // Any other error we just exit the loop. We don't expect there to
+ // be errors since our use case for this is reading/writing from
+ // a in-process pipe (os.Pipe).
+ if err != nil {
+ log.Warn("error copying stdio data, stopping copy", "err", err)
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go
new file mode 100644
index 00000000..1854d2dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go
@@ -0,0 +1,16 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package cmdrunner
+
+// addrTranslator implements stateless identity functions, as the host and plugin
+// run in the same context wrt Unix and network addresses.
+type addrTranslator struct{}
+
+func (*addrTranslator) PluginToHost(pluginNet, pluginAddr string) (string, string, error) {
+ return pluginNet, pluginAddr, nil
+}
+
+func (*addrTranslator) HostToPlugin(hostNet, hostAddr string) (string, string, error) {
+ return hostNet, hostAddr, nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go
new file mode 100644
index 00000000..dce1a86a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go
@@ -0,0 +1,63 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package cmdrunner
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "os"
+
+ "github.com/hashicorp/go-plugin/runner"
+)
+
+// ReattachFunc returns a function that allows reattaching to a plugin running
+// as a plain process. The process may or may not be a child process.
+func ReattachFunc(pid int, addr net.Addr) runner.ReattachFunc {
+ return func() (runner.AttachedRunner, error) {
+ p, err := os.FindProcess(pid)
+ if err != nil {
+ // On Unix systems, FindProcess never returns an error.
+ // On Windows, for non-existent pids it returns:
+ // os.SyscallError - 'OpenProcess: the paremter is incorrect'
+ return nil, ErrProcessNotFound
+ }
+
+ // Attempt to connect to the addr since on Unix systems FindProcess
+ // doesn't actually return an error if it can't find the process.
+ conn, err := net.Dial(addr.Network(), addr.String())
+ if err != nil {
+ p.Kill()
+ return nil, ErrProcessNotFound
+ }
+ conn.Close()
+
+ return &CmdAttachedRunner{
+ pid: pid,
+ process: p,
+ }, nil
+ }
+}
+
+// CmdAttachedRunner is mostly a subset of CmdRunner, except the Wait function
+// does not assume the process is a child of the host process, and so uses a
+// different implementation to wait on the process.
+type CmdAttachedRunner struct {
+ pid int
+ process *os.Process
+
+ addrTranslator
+}
+
+func (c *CmdAttachedRunner) Wait(_ context.Context) error {
+ return pidWait(c.pid)
+}
+
+func (c *CmdAttachedRunner) Kill(_ context.Context) error {
+ return c.process.Kill()
+}
+
+func (c *CmdAttachedRunner) ID() string {
+ return fmt.Sprintf("%d", c.pid)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go
new file mode 100644
index 00000000..b26fea92
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go
@@ -0,0 +1,129 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package cmdrunner
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-plugin/runner"
+)
+
+var (
+ _ runner.Runner = (*CmdRunner)(nil)
+
+ // ErrProcessNotFound is returned when a client is instantiated to
+ // reattach to an existing process and it isn't found.
+ ErrProcessNotFound = errors.New("Reattachment process not found")
+)
+
+const unrecognizedRemotePluginMessage = `This usually means
+ the plugin was not compiled for this architecture,
+ the plugin is missing dynamic-link libraries necessary to run,
+ the plugin is not executable by this process due to file permissions, or
+ the plugin failed to negotiate the initial go-plugin protocol handshake
+%s`
+
+// CmdRunner implements the runner.Runner interface. It mostly just passes through
+// to exec.Cmd methods.
+type CmdRunner struct {
+ logger hclog.Logger
+ cmd *exec.Cmd
+
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+
+ // Cmd info is persisted early, since the process information will be removed
+ // after Kill is called.
+ path string
+ pid int
+
+ addrTranslator
+}
+
+// NewCmdRunner returns an implementation of runner.Runner for running a plugin
+// as a subprocess. It must be passed a cmd that hasn't yet been started.
+func NewCmdRunner(logger hclog.Logger, cmd *exec.Cmd) (*CmdRunner, error) {
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ stderr, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ return &CmdRunner{
+ logger: logger,
+ cmd: cmd,
+ stdout: stdout,
+ stderr: stderr,
+ path: cmd.Path,
+ }, nil
+}
+
+func (c *CmdRunner) Start(_ context.Context) error {
+ c.logger.Debug("starting plugin", "path", c.cmd.Path, "args", c.cmd.Args)
+ err := c.cmd.Start()
+ if err != nil {
+ return err
+ }
+
+ c.pid = c.cmd.Process.Pid
+ c.logger.Debug("plugin started", "path", c.path, "pid", c.pid)
+ return nil
+}
+
+func (c *CmdRunner) Wait(_ context.Context) error {
+ return c.cmd.Wait()
+}
+
+func (c *CmdRunner) Kill(_ context.Context) error {
+ if c.cmd.Process != nil {
+ err := c.cmd.Process.Kill()
+ // Swallow ErrProcessDone, we support calling Kill multiple times.
+ if !errors.Is(err, os.ErrProcessDone) {
+ return err
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *CmdRunner) Stdout() io.ReadCloser {
+ return c.stdout
+}
+
+func (c *CmdRunner) Stderr() io.ReadCloser {
+ return c.stderr
+}
+
+func (c *CmdRunner) Name() string {
+ return c.path
+}
+
+func (c *CmdRunner) ID() string {
+ return fmt.Sprintf("%d", c.pid)
+}
+
+// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format
+// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports.
+var peTypes = map[uint16]string{
+ 0x14c: "386",
+ 0x1c0: "arm",
+ 0x6264: "loong64",
+ 0x8664: "amd64",
+ 0xaa64: "arm64",
+}
+
+func (c *CmdRunner) Diagnose(_ context.Context) string {
+ return fmt.Sprintf(unrecognizedRemotePluginMessage, additionalNotesAboutCommand(c.cmd.Path))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go
new file mode 100644
index 00000000..ce04cfeb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go
@@ -0,0 +1,70 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+//go:build !windows
+// +build !windows
+
+package cmdrunner
+
+import (
+ "debug/elf"
+ "debug/macho"
+ "debug/pe"
+ "fmt"
+ "os"
+ "os/user"
+ "runtime"
+ "strconv"
+ "syscall"
+)
+
+// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose
+// why it won't run correctly. It runs as a best effort only.
+func additionalNotesAboutCommand(path string) string {
+ notes := ""
+ stat, err := os.Stat(path)
+ if err != nil {
+ return notes
+ }
+
+ notes += "\nAdditional notes about plugin:\n"
+ notes += fmt.Sprintf(" Path: %s\n", path)
+ notes += fmt.Sprintf(" Mode: %s\n", stat.Mode())
+ statT, ok := stat.Sys().(*syscall.Stat_t)
+ if ok {
+ currentUsername := "?"
+ if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil {
+ currentUsername = u.Username
+ }
+ currentGroup := "?"
+ if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil {
+ currentGroup = g.Name
+ }
+ username := "?"
+ if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil {
+ username = u.Username
+ }
+ group := "?"
+ if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil {
+ group = g.Name
+ }
+ notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername)
+ notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup)
+ }
+
+ if elfFile, err := elf.Open(path); err == nil {
+ defer elfFile.Close()
+ notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH)
+ } else if machoFile, err := macho.Open(path); err == nil {
+ defer machoFile.Close()
+ notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH)
+ } else if peFile, err := pe.Open(path); err == nil {
+ defer peFile.Close()
+ machine, ok := peTypes[peFile.Machine]
+ if !ok {
+ machine = "unknown"
+ }
+ notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH)
+ }
+ return notes
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go
new file mode 100644
index 00000000..39c51dd1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go
@@ -0,0 +1,46 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+//go:build windows
+// +build windows
+
+package cmdrunner
+
+import (
+ "debug/elf"
+ "debug/macho"
+ "debug/pe"
+ "fmt"
+ "os"
+ "runtime"
+)
+
+// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose
+// why it won't run correctly. It runs as a best effort only.
+func additionalNotesAboutCommand(path string) string {
+ notes := ""
+ stat, err := os.Stat(path)
+ if err != nil {
+ return notes
+ }
+
+ notes += "\nAdditional notes about plugin:\n"
+ notes += fmt.Sprintf(" Path: %s\n", path)
+ notes += fmt.Sprintf(" Mode: %s\n", stat.Mode())
+
+ if elfFile, err := elf.Open(path); err == nil {
+ defer elfFile.Close()
+ notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH)
+ } else if machoFile, err := macho.Open(path); err == nil {
+ defer machoFile.Close()
+ notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH)
+ } else if peFile, err := pe.Open(path); err == nil {
+ defer peFile.Close()
+ machine, ok := peTypes[peFile.Machine]
+ if !ok {
+ machine = "unknown"
+ }
+ notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH)
+ }
+ return notes
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go
new file mode 100644
index 00000000..6c34dc77
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go
@@ -0,0 +1,25 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package cmdrunner
+
+import "time"
+
+// pidAlive checks whether a pid is alive.
+func pidAlive(pid int) bool {
+ return _pidAlive(pid)
+}
+
+// pidWait blocks for a process to exit.
+func pidWait(pid int) error {
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ if !pidAlive(pid) {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go
new file mode 100644
index 00000000..bf3fc5b6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go
@@ -0,0 +1,23 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+//go:build !windows
+// +build !windows
+
+package cmdrunner
+
+import (
+ "os"
+ "syscall"
+)
+
+// _pidAlive tests whether a process is alive or not by sending it Signal 0,
+// since Go otherwise has no way to test this.
+func _pidAlive(pid int) bool {
+ proc, err := os.FindProcess(pid)
+ if err == nil {
+ err = proc.Signal(syscall.Signal(0))
+ }
+
+ return err == nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go
new file mode 100644
index 00000000..6c39df28
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go
@@ -0,0 +1,33 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package cmdrunner
+
+import (
+ "syscall"
+)
+
+const (
+ // Weird name but matches the MSDN docs
+ exit_STILL_ACTIVE = 259
+
+ processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
+ syscall.PROCESS_QUERY_INFORMATION |
+ syscall.SYNCHRONIZE
+)
+
+// _pidAlive tests whether a process is alive or not
+func _pidAlive(pid int) bool {
+ h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
+ if err != nil {
+ return false
+ }
+ defer syscall.CloseHandle(h)
+
+ var ec uint32
+ if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
+ return false
+ }
+
+ return ec == exit_STILL_ACTIVE
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go
new file mode 100644
index 00000000..e8a3a152
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go
@@ -0,0 +1,51 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package grpcmux
+
+import (
+ "io"
+ "net"
+
+ "github.com/hashicorp/yamux"
+)
+
+var _ net.Listener = (*blockedClientListener)(nil)
+
+// blockedClientListener accepts connections for a specific gRPC broker stream
+// ID on the client (host) side of the connection.
+type blockedClientListener struct {
+ session *yamux.Session
+ waitCh chan struct{}
+ doneCh <-chan struct{}
+}
+
+func newBlockedClientListener(session *yamux.Session, doneCh <-chan struct{}) *blockedClientListener {
+ return &blockedClientListener{
+ waitCh: make(chan struct{}, 1),
+ doneCh: doneCh,
+ session: session,
+ }
+}
+
+func (b *blockedClientListener) Accept() (net.Conn, error) {
+ select {
+ case <-b.waitCh:
+ return b.session.Accept()
+ case <-b.doneCh:
+ return nil, io.EOF
+ }
+}
+
+func (b *blockedClientListener) Addr() net.Addr {
+ return b.session.Addr()
+}
+
+func (b *blockedClientListener) Close() error {
+ // We don't close the session, the client muxer is responsible for that.
+ return nil
+}
+
+func (b *blockedClientListener) unblock() {
+ b.waitCh <- struct{}{}
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go
new file mode 100644
index 00000000..0edb2c05
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go
@@ -0,0 +1,49 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package grpcmux
+
+import (
+ "io"
+ "net"
+)
+
+var _ net.Listener = (*blockedServerListener)(nil)
+
+// blockedServerListener accepts connections for a specific gRPC broker stream
+// ID on the server (plugin) side of the connection.
+type blockedServerListener struct {
+ addr net.Addr
+ acceptCh chan acceptResult
+ doneCh <-chan struct{}
+}
+
+type acceptResult struct {
+ conn net.Conn
+ err error
+}
+
+func newBlockedServerListener(addr net.Addr, doneCh <-chan struct{}) *blockedServerListener {
+ return &blockedServerListener{
+ addr: addr,
+ acceptCh: make(chan acceptResult),
+ doneCh: doneCh,
+ }
+}
+
+func (b *blockedServerListener) Accept() (net.Conn, error) {
+ select {
+ case accept := <-b.acceptCh:
+ return accept.conn, accept.err
+ case <-b.doneCh:
+ return nil, io.EOF
+ }
+}
+
+func (b *blockedServerListener) Addr() net.Addr {
+ return b.addr
+}
+
+func (b *blockedServerListener) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go
new file mode 100644
index 00000000..b203ba46
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go
@@ -0,0 +1,105 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package grpcmux
+
+import (
+ "fmt"
+ "net"
+ "sync"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/yamux"
+)
+
+var _ GRPCMuxer = (*GRPCClientMuxer)(nil)
+
+// GRPCClientMuxer implements the client (host) side of the gRPC broker's
+// GRPCMuxer interface for multiplexing multiple gRPC broker connections over
+// a single net.Conn.
+//
+// The client dials the initial net.Conn eagerly, and creates a yamux.Session
+// as the implementation for multiplexing any additional connections.
+//
+// Each net.Listener returned from Listener will block until the client receives
+// a knock that matches its gRPC broker stream ID. There is no default listener
+// on the client, as it is a client for the gRPC broker's control services. (See
+// GRPCServerMuxer for more details).
+type GRPCClientMuxer struct {
+ logger hclog.Logger
+ session *yamux.Session
+
+ acceptMutex sync.Mutex
+ acceptListeners map[uint32]*blockedClientListener
+}
+
+func NewGRPCClientMuxer(logger hclog.Logger, addr net.Addr) (*GRPCClientMuxer, error) {
+ // Eagerly establish the underlying connection as early as possible.
+ logger.Debug("making new client mux initial connection", "addr", addr)
+ conn, err := net.Dial(addr.Network(), addr.String())
+ if err != nil {
+ return nil, err
+ }
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ // Make sure to set keep alive so that the connection doesn't die
+ _ = tcpConn.SetKeepAlive(true)
+ }
+
+ cfg := yamux.DefaultConfig()
+ cfg.Logger = logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{
+ InferLevels: true,
+ })
+ cfg.LogOutput = nil
+ sess, err := yamux.Client(conn, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ logger.Debug("client muxer connected", "addr", addr)
+ m := &GRPCClientMuxer{
+ logger: logger,
+ session: sess,
+ acceptListeners: make(map[uint32]*blockedClientListener),
+ }
+
+ return m, nil
+}
+
+func (m *GRPCClientMuxer) Enabled() bool {
+ return m != nil
+}
+
+func (m *GRPCClientMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) {
+ ln := newBlockedClientListener(m.session, doneCh)
+
+ m.acceptMutex.Lock()
+ m.acceptListeners[id] = ln
+ m.acceptMutex.Unlock()
+
+ return ln, nil
+}
+
+func (m *GRPCClientMuxer) AcceptKnock(id uint32) error {
+ m.acceptMutex.Lock()
+ defer m.acceptMutex.Unlock()
+
+ ln, ok := m.acceptListeners[id]
+ if !ok {
+ return fmt.Errorf("no listener for id %d", id)
+ }
+ ln.unblock()
+ return nil
+}
+
+func (m *GRPCClientMuxer) Dial() (net.Conn, error) {
+ stream, err := m.session.Open()
+ if err != nil {
+ return nil, fmt.Errorf("error dialling new client stream: %w", err)
+ }
+
+ return stream, nil
+}
+
+func (m *GRPCClientMuxer) Close() error {
+ return m.session.Close()
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go
new file mode 100644
index 00000000..c52aaf55
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go
@@ -0,0 +1,41 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package grpcmux
+
+import (
+ "net"
+)
+
+// GRPCMuxer enables multiple implementations of net.Listener to accept
+// connections over a single "main" multiplexed net.Conn, and dial multiple
+// client connections over the same multiplexed net.Conn.
+//
+// The first multiplexed connection is used to serve the gRPC broker's own
+// control services: plugin.GRPCBroker, plugin.GRPCController, plugin.GRPCStdio.
+//
+// Clients must "knock" before dialling, to tell the server side that the
+// next net.Conn should be accepted onto a specific stream ID. The knock is a
+// bidirectional streaming message on the plugin.GRPCBroker service.
+type GRPCMuxer interface {
+ // Enabled determines whether multiplexing should be used. It saves users
+ // of the interface from having to compare an interface with nil, which
+ // is a bit awkward to do correctly.
+ Enabled() bool
+
+ // Listener returns a multiplexed listener that will wait until AcceptKnock
+ // is called with a matching ID before its Accept function returns.
+ Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error)
+
+ // AcceptKnock unblocks the listener with the matching ID, and returns an
+ // error if it hasn't been created yet.
+ AcceptKnock(id uint32) error
+
+ // Dial makes a new multiplexed client connection. To dial a specific ID,
+ // a knock must be sent first.
+ Dial() (net.Conn, error)
+
+ // Close closes connections and releases any resources associated with the
+ // muxer.
+ Close() error
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go
new file mode 100644
index 00000000..27696ee7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go
@@ -0,0 +1,190 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package grpcmux
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/yamux"
+)
+
+var _ GRPCMuxer = (*GRPCServerMuxer)(nil)
+var _ net.Listener = (*GRPCServerMuxer)(nil)
+
+// GRPCServerMuxer implements the server (plugin) side of the gRPC broker's
+// GRPCMuxer interface for multiplexing multiple gRPC broker connections over
+// a single net.Conn.
+//
+// The server side needs a listener to serve the gRPC broker's control services,
+// which includes the service we will receive knocks on. That means we always
+// accept the first connection onto a "default" main listener, and if we accept
+// any further connections without receiving a knock first, they are also given
+// to the default listener.
+//
+// When creating additional multiplexed listeners for specific stream IDs, we
+// can't control the order in which gRPC servers will call Accept() on each
+// listener, but we do need to control which gRPC server accepts which connection.
+// As such, each multiplexed listener blocks waiting on a channel. It will be
+// unblocked when a knock is received for the matching stream ID.
+type GRPCServerMuxer struct {
+ addr net.Addr
+ logger hclog.Logger
+
+ sessionErrCh chan error
+ sess *yamux.Session
+
+ knockCh chan uint32
+
+ acceptMutex sync.Mutex
+ acceptChannels map[uint32]chan acceptResult
+}
+
+func NewGRPCServerMuxer(logger hclog.Logger, ln net.Listener) *GRPCServerMuxer {
+ m := &GRPCServerMuxer{
+ addr: ln.Addr(),
+ logger: logger,
+
+ sessionErrCh: make(chan error),
+
+ knockCh: make(chan uint32, 1),
+ acceptChannels: make(map[uint32]chan acceptResult),
+ }
+
+ go m.acceptSession(ln)
+
+ return m
+}
+
+// acceptSessionAndMuxAccept is responsible for establishing the yamux session,
+// and then kicking off the acceptLoop function.
+func (m *GRPCServerMuxer) acceptSession(ln net.Listener) {
+ defer close(m.sessionErrCh)
+
+ m.logger.Debug("accepting initial connection", "addr", m.addr)
+ conn, err := ln.Accept()
+ if err != nil {
+ m.sessionErrCh <- err
+ return
+ }
+
+ m.logger.Debug("initial server connection accepted", "addr", m.addr)
+ cfg := yamux.DefaultConfig()
+ cfg.Logger = m.logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{
+ InferLevels: true,
+ })
+ cfg.LogOutput = nil
+ m.sess, err = yamux.Server(conn, cfg)
+ if err != nil {
+ m.sessionErrCh <- err
+ return
+ }
+}
+
+func (m *GRPCServerMuxer) session() (*yamux.Session, error) {
+ select {
+ case err := <-m.sessionErrCh:
+ if err != nil {
+ return nil, err
+ }
+ case <-time.After(5 * time.Second):
+ return nil, errors.New("timed out waiting for connection to be established")
+ }
+
+ // Should never happen.
+ if m.sess == nil {
+ return nil, errors.New("no connection established and no error received")
+ }
+
+ return m.sess, nil
+}
+
+// Accept accepts all incoming connections and routes them to the correct
+// stream ID based on the most recent knock received.
+func (m *GRPCServerMuxer) Accept() (net.Conn, error) {
+ session, err := m.session()
+ if err != nil {
+ return nil, fmt.Errorf("error establishing yamux session: %w", err)
+ }
+
+ for {
+ conn, acceptErr := session.Accept()
+
+ select {
+ case id := <-m.knockCh:
+ m.acceptMutex.Lock()
+ acceptCh, ok := m.acceptChannels[id]
+ m.acceptMutex.Unlock()
+
+ if !ok {
+ if conn != nil {
+ _ = conn.Close()
+ }
+ return nil, fmt.Errorf("received knock on ID %d that doesn't have a listener", id)
+ }
+ m.logger.Debug("sending conn to brokered listener", "id", id)
+ acceptCh <- acceptResult{
+ conn: conn,
+ err: acceptErr,
+ }
+ default:
+ m.logger.Debug("sending conn to default listener")
+ return conn, acceptErr
+ }
+ }
+}
+
+func (m *GRPCServerMuxer) Addr() net.Addr {
+ return m.addr
+}
+
+func (m *GRPCServerMuxer) Close() error {
+ session, err := m.session()
+ if err != nil {
+ return err
+ }
+
+ return session.Close()
+}
+
+func (m *GRPCServerMuxer) Enabled() bool {
+ return m != nil
+}
+
+func (m *GRPCServerMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) {
+ sess, err := m.session()
+ if err != nil {
+ return nil, err
+ }
+
+ ln := newBlockedServerListener(sess.Addr(), doneCh)
+ m.acceptMutex.Lock()
+ m.acceptChannels[id] = ln.acceptCh
+ m.acceptMutex.Unlock()
+
+ return ln, nil
+}
+
+func (m *GRPCServerMuxer) Dial() (net.Conn, error) {
+ sess, err := m.session()
+ if err != nil {
+ return nil, err
+ }
+
+ stream, err := sess.OpenStream()
+ if err != nil {
+ return nil, fmt.Errorf("error dialling new server stream: %w", err)
+ }
+
+ return stream, nil
+}
+
+func (m *GRPCServerMuxer) AcceptKnock(id uint32) error {
+ m.knockCh <- id
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
new file mode 100644
index 00000000..acc6dc9c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
@@ -0,0 +1,264 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc (unknown)
+// source: internal/plugin/grpc_broker.proto
+
+package plugin
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ConnInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"`
+ Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
+ Knock *ConnInfo_Knock `protobuf:"bytes,4,opt,name=knock,proto3" json:"knock,omitempty"`
+}
+
+func (x *ConnInfo) Reset() {
+ *x = ConnInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConnInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConnInfo) ProtoMessage() {}
+
+func (x *ConnInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConnInfo.ProtoReflect.Descriptor instead.
+func (*ConnInfo) Descriptor() ([]byte, []int) {
+ return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ConnInfo) GetServiceId() uint32 {
+ if x != nil {
+ return x.ServiceId
+ }
+ return 0
+}
+
+func (x *ConnInfo) GetNetwork() string {
+ if x != nil {
+ return x.Network
+ }
+ return ""
+}
+
+func (x *ConnInfo) GetAddress() string {
+ if x != nil {
+ return x.Address
+ }
+ return ""
+}
+
+func (x *ConnInfo) GetKnock() *ConnInfo_Knock {
+ if x != nil {
+ return x.Knock
+ }
+ return nil
+}
+
+type ConnInfo_Knock struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Knock bool `protobuf:"varint,1,opt,name=knock,proto3" json:"knock,omitempty"`
+ Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"`
+ Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *ConnInfo_Knock) Reset() {
+ *x = ConnInfo_Knock{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConnInfo_Knock) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConnInfo_Knock) ProtoMessage() {}
+
+func (x *ConnInfo_Knock) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConnInfo_Knock.ProtoReflect.Descriptor instead.
+func (*ConnInfo_Knock) Descriptor() ([]byte, []int) {
+ return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *ConnInfo_Knock) GetKnock() bool {
+ if x != nil {
+ return x.Knock
+ }
+ return false
+}
+
+func (x *ConnInfo_Knock) GetAck() bool {
+ if x != nil {
+ return x.Ack
+ }
+ return false
+}
+
+func (x *ConnInfo_Knock) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+var File_internal_plugin_grpc_broker_proto protoreflect.FileDescriptor
+
+var file_internal_plugin_grpc_broker_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
+ 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x08,
+ 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f,
+ 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6b,
+ 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x6c, 0x75,
+ 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4b, 0x6e, 0x6f,
+ 0x63, 0x6b, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x1a, 0x45, 0x0a, 0x05, 0x4b, 0x6e, 0x6f,
+ 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x32, 0x43, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x35,
+ 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, 0x2e,
+ 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x1a,
+ 0x10, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66,
+ 0x6f, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
+ 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_internal_plugin_grpc_broker_proto_rawDescOnce sync.Once
+ file_internal_plugin_grpc_broker_proto_rawDescData = file_internal_plugin_grpc_broker_proto_rawDesc
+)
+
+func file_internal_plugin_grpc_broker_proto_rawDescGZIP() []byte {
+ file_internal_plugin_grpc_broker_proto_rawDescOnce.Do(func() {
+ file_internal_plugin_grpc_broker_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_broker_proto_rawDescData)
+ })
+ return file_internal_plugin_grpc_broker_proto_rawDescData
+}
+
+var file_internal_plugin_grpc_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_internal_plugin_grpc_broker_proto_goTypes = []interface{}{
+ (*ConnInfo)(nil), // 0: plugin.ConnInfo
+ (*ConnInfo_Knock)(nil), // 1: plugin.ConnInfo.Knock
+}
+var file_internal_plugin_grpc_broker_proto_depIdxs = []int32{
+ 1, // 0: plugin.ConnInfo.knock:type_name -> plugin.ConnInfo.Knock
+ 0, // 1: plugin.GRPCBroker.StartStream:input_type -> plugin.ConnInfo
+ 0, // 2: plugin.GRPCBroker.StartStream:output_type -> plugin.ConnInfo
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_internal_plugin_grpc_broker_proto_init() }
+func file_internal_plugin_grpc_broker_proto_init() {
+ if File_internal_plugin_grpc_broker_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_internal_plugin_grpc_broker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConnInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_internal_plugin_grpc_broker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConnInfo_Knock); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_internal_plugin_grpc_broker_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_internal_plugin_grpc_broker_proto_goTypes,
+ DependencyIndexes: file_internal_plugin_grpc_broker_proto_depIdxs,
+ MessageInfos: file_internal_plugin_grpc_broker_proto_msgTypes,
+ }.Build()
+ File_internal_plugin_grpc_broker_proto = out.File
+ file_internal_plugin_grpc_broker_proto_rawDesc = nil
+ file_internal_plugin_grpc_broker_proto_goTypes = nil
+ file_internal_plugin_grpc_broker_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
new file mode 100644
index 00000000..c92cd645
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
@@ -0,0 +1,22 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+syntax = "proto3";
+package plugin;
+option go_package = "./plugin";
+
+message ConnInfo {
+ uint32 service_id = 1;
+ string network = 2;
+ string address = 3;
+ message Knock {
+ bool knock = 1;
+ bool ack = 2;
+ string error = 3;
+ }
+ Knock knock = 4;
+}
+
+service GRPCBroker {
+ rpc StartStream(stream ConnInfo) returns (stream ConnInfo);
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go
new file mode 100644
index 00000000..1b0f8070
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go
@@ -0,0 +1,142 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc (unknown)
+// source: internal/plugin/grpc_broker.proto
+
+package plugin
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ GRPCBroker_StartStream_FullMethodName = "/plugin.GRPCBroker/StartStream"
+)
+
+// GRPCBrokerClient is the client API for GRPCBroker service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type GRPCBrokerClient interface {
+ StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error)
+}
+
+type gRPCBrokerClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewGRPCBrokerClient(cc grpc.ClientConnInterface) GRPCBrokerClient {
+ return &gRPCBrokerClient{cc}
+}
+
+func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) {
+ stream, err := c.cc.NewStream(ctx, &GRPCBroker_ServiceDesc.Streams[0], GRPCBroker_StartStream_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &gRPCBrokerStartStreamClient{stream}
+ return x, nil
+}
+
+type GRPCBroker_StartStreamClient interface {
+ Send(*ConnInfo) error
+ Recv() (*ConnInfo, error)
+ grpc.ClientStream
+}
+
+type gRPCBrokerStartStreamClient struct {
+ grpc.ClientStream
+}
+
+func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) {
+ m := new(ConnInfo)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// GRPCBrokerServer is the server API for GRPCBroker service.
+// All implementations should embed UnimplementedGRPCBrokerServer
+// for forward compatibility
+type GRPCBrokerServer interface {
+ StartStream(GRPCBroker_StartStreamServer) error
+}
+
+// UnimplementedGRPCBrokerServer should be embedded to have forward compatible implementations.
+type UnimplementedGRPCBrokerServer struct {
+}
+
+func (UnimplementedGRPCBrokerServer) StartStream(GRPCBroker_StartStreamServer) error {
+ return status.Errorf(codes.Unimplemented, "method StartStream not implemented")
+}
+
+// UnsafeGRPCBrokerServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to GRPCBrokerServer will
+// result in compilation errors.
+type UnsafeGRPCBrokerServer interface {
+ mustEmbedUnimplementedGRPCBrokerServer()
+}
+
+func RegisterGRPCBrokerServer(s grpc.ServiceRegistrar, srv GRPCBrokerServer) {
+ s.RegisterService(&GRPCBroker_ServiceDesc, srv)
+}
+
+func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream})
+}
+
+type GRPCBroker_StartStreamServer interface {
+ Send(*ConnInfo) error
+ Recv() (*ConnInfo, error)
+ grpc.ServerStream
+}
+
+type gRPCBrokerStartStreamServer struct {
+ grpc.ServerStream
+}
+
+func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) {
+ m := new(ConnInfo)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// GRPCBroker_ServiceDesc is the grpc.ServiceDesc for GRPCBroker service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var GRPCBroker_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "plugin.GRPCBroker",
+ HandlerType: (*GRPCBrokerServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StartStream",
+ Handler: _GRPCBroker_StartStream_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "internal/plugin/grpc_broker.proto",
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
new file mode 100644
index 00000000..8ca48e0d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
@@ -0,0 +1,141 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc (unknown)
+// source: internal/plugin/grpc_controller.proto
+
+package plugin
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Empty struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Empty) Reset() {
+ *x = Empty{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Empty) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Empty) ProtoMessage() {}
+
+func (x *Empty) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+ return file_internal_plugin_grpc_controller_proto_rawDescGZIP(), []int{0}
+}
+
+var File_internal_plugin_grpc_controller_proto protoreflect.FileDescriptor
+
+var file_internal_plugin_grpc_controller_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
+ 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22,
+ 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x3a, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x08, 0x53, 0x68,
+ 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_internal_plugin_grpc_controller_proto_rawDescOnce sync.Once
+ file_internal_plugin_grpc_controller_proto_rawDescData = file_internal_plugin_grpc_controller_proto_rawDesc
+)
+
+func file_internal_plugin_grpc_controller_proto_rawDescGZIP() []byte {
+ file_internal_plugin_grpc_controller_proto_rawDescOnce.Do(func() {
+ file_internal_plugin_grpc_controller_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_controller_proto_rawDescData)
+ })
+ return file_internal_plugin_grpc_controller_proto_rawDescData
+}
+
+var file_internal_plugin_grpc_controller_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_internal_plugin_grpc_controller_proto_goTypes = []interface{}{
+ (*Empty)(nil), // 0: plugin.Empty
+}
+var file_internal_plugin_grpc_controller_proto_depIdxs = []int32{
+ 0, // 0: plugin.GRPCController.Shutdown:input_type -> plugin.Empty
+ 0, // 1: plugin.GRPCController.Shutdown:output_type -> plugin.Empty
+ 1, // [1:2] is the sub-list for method output_type
+ 0, // [0:1] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_internal_plugin_grpc_controller_proto_init() }
+func file_internal_plugin_grpc_controller_proto_init() {
+ if File_internal_plugin_grpc_controller_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_internal_plugin_grpc_controller_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Empty); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_internal_plugin_grpc_controller_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_internal_plugin_grpc_controller_proto_goTypes,
+ DependencyIndexes: file_internal_plugin_grpc_controller_proto_depIdxs,
+ MessageInfos: file_internal_plugin_grpc_controller_proto_msgTypes,
+ }.Build()
+ File_internal_plugin_grpc_controller_proto = out.File
+ file_internal_plugin_grpc_controller_proto_rawDesc = nil
+ file_internal_plugin_grpc_controller_proto_goTypes = nil
+ file_internal_plugin_grpc_controller_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
new file mode 100644
index 00000000..2755fa63
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
@@ -0,0 +1,14 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+syntax = "proto3";
+package plugin;
+option go_package = "./plugin";
+
+message Empty {
+}
+
+// The GRPCController is responsible for telling the plugin server to shutdown.
+service GRPCController {
+ rpc Shutdown(Empty) returns (Empty);
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go
new file mode 100644
index 00000000..427611aa
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go
@@ -0,0 +1,110 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc (unknown)
+// source: internal/plugin/grpc_controller.proto
+
+package plugin
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ GRPCController_Shutdown_FullMethodName = "/plugin.GRPCController/Shutdown"
+)
+
+// GRPCControllerClient is the client API for GRPCController service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type GRPCControllerClient interface {
+ Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+}
+
+type gRPCControllerClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewGRPCControllerClient(cc grpc.ClientConnInterface) GRPCControllerClient {
+ return &gRPCControllerClient{cc}
+}
+
+func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+ out := new(Empty)
+ err := c.cc.Invoke(ctx, GRPCController_Shutdown_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// GRPCControllerServer is the server API for GRPCController service.
+// All implementations should embed UnimplementedGRPCControllerServer
+// for forward compatibility
+type GRPCControllerServer interface {
+ Shutdown(context.Context, *Empty) (*Empty, error)
+}
+
+// UnimplementedGRPCControllerServer should be embedded to have forward compatible implementations.
+type UnimplementedGRPCControllerServer struct {
+}
+
+func (UnimplementedGRPCControllerServer) Shutdown(context.Context, *Empty) (*Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented")
+}
+
+// UnsafeGRPCControllerServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to GRPCControllerServer will
+// result in compilation errors.
+type UnsafeGRPCControllerServer interface {
+ mustEmbedUnimplementedGRPCControllerServer()
+}
+
+func RegisterGRPCControllerServer(s grpc.ServiceRegistrar, srv GRPCControllerServer) {
+ s.RegisterService(&GRPCController_ServiceDesc, srv)
+}
+
+func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GRPCControllerServer).Shutdown(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: GRPCController_Shutdown_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// GRPCController_ServiceDesc is the grpc.ServiceDesc for GRPCController service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var GRPCController_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "plugin.GRPCController",
+ HandlerType: (*GRPCControllerServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Shutdown",
+ Handler: _GRPCController_Shutdown_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "internal/plugin/grpc_controller.proto",
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
new file mode 100644
index 00000000..139cbb4a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
@@ -0,0 +1,225 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc (unknown)
+// source: internal/plugin/grpc_stdio.proto
+
+package plugin
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type StdioData_Channel int32
+
+const (
+ StdioData_INVALID StdioData_Channel = 0
+ StdioData_STDOUT StdioData_Channel = 1
+ StdioData_STDERR StdioData_Channel = 2
+)
+
+// Enum value maps for StdioData_Channel.
+var (
+ StdioData_Channel_name = map[int32]string{
+ 0: "INVALID",
+ 1: "STDOUT",
+ 2: "STDERR",
+ }
+ StdioData_Channel_value = map[string]int32{
+ "INVALID": 0,
+ "STDOUT": 1,
+ "STDERR": 2,
+ }
+)
+
+func (x StdioData_Channel) Enum() *StdioData_Channel {
+ p := new(StdioData_Channel)
+ *p = x
+ return p
+}
+
+func (x StdioData_Channel) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (StdioData_Channel) Descriptor() protoreflect.EnumDescriptor {
+ return file_internal_plugin_grpc_stdio_proto_enumTypes[0].Descriptor()
+}
+
+func (StdioData_Channel) Type() protoreflect.EnumType {
+ return &file_internal_plugin_grpc_stdio_proto_enumTypes[0]
+}
+
+func (x StdioData_Channel) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use StdioData_Channel.Descriptor instead.
+func (StdioData_Channel) EnumDescriptor() ([]byte, []int) {
+ return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// StdioData is a single chunk of stdout or stderr data that is streamed
+// from GRPCStdio.
+type StdioData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *StdioData) Reset() {
+ *x = StdioData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StdioData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StdioData) ProtoMessage() {}
+
+func (x *StdioData) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StdioData.ProtoReflect.Descriptor instead.
+func (*StdioData) Descriptor() ([]byte, []int) {
+ return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *StdioData) GetChannel() StdioData_Channel {
+ if x != nil {
+ return x.Channel
+ }
+ return StdioData_INVALID
+}
+
+func (x *StdioData) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+var File_internal_plugin_grpc_stdio_proto protoreflect.FileDescriptor
+
+var file_internal_plugin_grpc_stdio_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
+ 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x64, 0x69,
+ 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e,
+ 0x53, 0x74, 0x64, 0x69, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
+ 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e,
+ 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56,
+ 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54,
+ 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x47,
+ 0x0a, 0x09, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x64, 0x69,
+ 0x6f, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75,
+ 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_internal_plugin_grpc_stdio_proto_rawDescOnce sync.Once
+ file_internal_plugin_grpc_stdio_proto_rawDescData = file_internal_plugin_grpc_stdio_proto_rawDesc
+)
+
+func file_internal_plugin_grpc_stdio_proto_rawDescGZIP() []byte {
+ file_internal_plugin_grpc_stdio_proto_rawDescOnce.Do(func() {
+ file_internal_plugin_grpc_stdio_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_stdio_proto_rawDescData)
+ })
+ return file_internal_plugin_grpc_stdio_proto_rawDescData
+}
+
+var file_internal_plugin_grpc_stdio_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_internal_plugin_grpc_stdio_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_internal_plugin_grpc_stdio_proto_goTypes = []interface{}{
+ (StdioData_Channel)(0), // 0: plugin.StdioData.Channel
+ (*StdioData)(nil), // 1: plugin.StdioData
+ (*emptypb.Empty)(nil), // 2: google.protobuf.Empty
+}
+var file_internal_plugin_grpc_stdio_proto_depIdxs = []int32{
+ 0, // 0: plugin.StdioData.channel:type_name -> plugin.StdioData.Channel
+ 2, // 1: plugin.GRPCStdio.StreamStdio:input_type -> google.protobuf.Empty
+ 1, // 2: plugin.GRPCStdio.StreamStdio:output_type -> plugin.StdioData
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_internal_plugin_grpc_stdio_proto_init() }
+func file_internal_plugin_grpc_stdio_proto_init() {
+ if File_internal_plugin_grpc_stdio_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_internal_plugin_grpc_stdio_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StdioData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_internal_plugin_grpc_stdio_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_internal_plugin_grpc_stdio_proto_goTypes,
+ DependencyIndexes: file_internal_plugin_grpc_stdio_proto_depIdxs,
+ EnumInfos: file_internal_plugin_grpc_stdio_proto_enumTypes,
+ MessageInfos: file_internal_plugin_grpc_stdio_proto_msgTypes,
+ }.Build()
+ File_internal_plugin_grpc_stdio_proto = out.File
+ file_internal_plugin_grpc_stdio_proto_rawDesc = nil
+ file_internal_plugin_grpc_stdio_proto_goTypes = nil
+ file_internal_plugin_grpc_stdio_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
new file mode 100644
index 00000000..f48ac76c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
@@ -0,0 +1,33 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+syntax = "proto3";
+package plugin;
+option go_package = "./plugin";
+
+import "google/protobuf/empty.proto";
+
+// GRPCStdio is a service that is automatically run by the plugin process
+// to stream any stdout/err data so that it can be mirrored on the plugin
+// host side.
+service GRPCStdio {
+ // StreamStdio returns a stream that contains all the stdout/stderr.
+ // This RPC endpoint must only be called ONCE. Once stdio data is consumed
+ // it is not sent again.
+ //
+ // Callers should connect early to prevent blocking on the plugin process.
+ rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData);
+}
+
+// StdioData is a single chunk of stdout or stderr data that is streamed
+// from GRPCStdio.
+message StdioData {
+ enum Channel {
+ INVALID = 0;
+ STDOUT = 1;
+ STDERR = 2;
+ }
+
+ Channel channel = 1;
+ bytes data = 2;
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go
new file mode 100644
index 00000000..f82b1503
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go
@@ -0,0 +1,148 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc (unknown)
+// source: internal/plugin/grpc_stdio.proto
+
+package plugin
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ GRPCStdio_StreamStdio_FullMethodName = "/plugin.GRPCStdio/StreamStdio"
+)
+
+// GRPCStdioClient is the client API for GRPCStdio service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type GRPCStdioClient interface {
+ // StreamStdio returns a stream that contains all the stdout/stderr.
+ // This RPC endpoint must only be called ONCE. Once stdio data is consumed
+ // it is not sent again.
+ //
+ // Callers should connect early to prevent blocking on the plugin process.
+ StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error)
+}
+
+type gRPCStdioClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewGRPCStdioClient(cc grpc.ClientConnInterface) GRPCStdioClient {
+ return &gRPCStdioClient{cc}
+}
+
+func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) {
+ stream, err := c.cc.NewStream(ctx, &GRPCStdio_ServiceDesc.Streams[0], GRPCStdio_StreamStdio_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &gRPCStdioStreamStdioClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type GRPCStdio_StreamStdioClient interface {
+ Recv() (*StdioData, error)
+ grpc.ClientStream
+}
+
+type gRPCStdioStreamStdioClient struct {
+ grpc.ClientStream
+}
+
+func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) {
+ m := new(StdioData)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// GRPCStdioServer is the server API for GRPCStdio service.
+// All implementations should embed UnimplementedGRPCStdioServer
+// for forward compatibility
+type GRPCStdioServer interface {
+ // StreamStdio returns a stream that contains all the stdout/stderr.
+ // This RPC endpoint must only be called ONCE. Once stdio data is consumed
+ // it is not sent again.
+ //
+ // Callers should connect early to prevent blocking on the plugin process.
+ StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error
+}
+
+// UnimplementedGRPCStdioServer should be embedded to have forward compatible implementations.
+type UnimplementedGRPCStdioServer struct {
+}
+
+func (UnimplementedGRPCStdioServer) StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error {
+ return status.Errorf(codes.Unimplemented, "method StreamStdio not implemented")
+}
+
+// UnsafeGRPCStdioServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to GRPCStdioServer will
+// result in compilation errors.
+type UnsafeGRPCStdioServer interface {
+ mustEmbedUnimplementedGRPCStdioServer()
+}
+
+func RegisterGRPCStdioServer(s grpc.ServiceRegistrar, srv GRPCStdioServer) {
+ s.RegisterService(&GRPCStdio_ServiceDesc, srv)
+}
+
+func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(emptypb.Empty)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream})
+}
+
+type GRPCStdio_StreamStdioServer interface {
+ Send(*StdioData) error
+ grpc.ServerStream
+}
+
+type gRPCStdioStreamStdioServer struct {
+ grpc.ServerStream
+}
+
+func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// GRPCStdio_ServiceDesc is the grpc.ServiceDesc for GRPCStdio service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var GRPCStdio_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "plugin.GRPCStdio",
+ HandlerType: (*GRPCStdioServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StreamStdio",
+ Handler: _GRPCStdio_StreamStdio_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "internal/plugin/grpc_stdio.proto",
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go
new file mode 100644
index 00000000..ab963d56
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go
@@ -0,0 +1,76 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host
+type logEntry struct {
+ Message string `json:"@message"`
+ Level string `json:"@level"`
+ Timestamp time.Time `json:"timestamp"`
+ KVPairs []*logEntryKV `json:"kv_pairs"`
+}
+
+// logEntryKV is a key value pair within the Output payload
+type logEntryKV struct {
+ Key string `json:"key"`
+ Value interface{} `json:"value"`
+}
+
+// flattenKVPairs is used to flatten KVPair slice into []interface{}
+// for hclog consumption.
+func flattenKVPairs(kvs []*logEntryKV) []interface{} {
+ var result []interface{}
+ for _, kv := range kvs {
+ result = append(result, kv.Key)
+ result = append(result, kv.Value)
+ }
+
+ return result
+}
+
+// parseJSON handles parsing JSON output
+func parseJSON(input []byte) (*logEntry, error) {
+ var raw map[string]interface{}
+ entry := &logEntry{}
+
+ err := json.Unmarshal(input, &raw)
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse hclog-specific objects
+ if v, ok := raw["@message"]; ok {
+ entry.Message = v.(string)
+ delete(raw, "@message")
+ }
+
+ if v, ok := raw["@level"]; ok {
+ entry.Level = v.(string)
+ delete(raw, "@level")
+ }
+
+ if v, ok := raw["@timestamp"]; ok {
+ t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string))
+ if err != nil {
+ return nil, err
+ }
+ entry.Timestamp = t
+ delete(raw, "@timestamp")
+ }
+
+ // Parse dynamic KV args from the hclog payload.
+ for k, v := range raw {
+ entry.KVPairs = append(entry.KVPairs, &logEntryKV{
+ Key: k,
+ Value: v,
+ })
+ }
+
+ return entry, nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go
new file mode 100644
index 00000000..09ecafaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/mtls.go
@@ -0,0 +1,76 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "math/big"
+ "time"
+)
+
+// generateCert generates a temporary certificate for plugin authentication. The
+// certificate and private key are returns in PEM format.
+func generateCert() (cert []byte, privateKey []byte, err error) {
+ key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ sn, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ host := "localhost"
+
+ template := &x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: host,
+ Organization: []string{"HashiCorp"},
+ },
+ DNSNames: []string{host},
+ ExtKeyUsage: []x509.ExtKeyUsage{
+ x509.ExtKeyUsageClientAuth,
+ x509.ExtKeyUsageServerAuth,
+ },
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ SerialNumber: sn,
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: time.Now().Add(262980 * time.Hour),
+ IsCA: true,
+ }
+
+ der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var certOut bytes.Buffer
+ if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil {
+ return nil, nil, err
+ }
+
+ keyBytes, err := x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var keyOut bytes.Buffer
+ if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil {
+ return nil, nil, err
+ }
+
+ cert = certOut.Bytes()
+ privateKey = keyOut.Bytes()
+
+ return cert, privateKey, nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
new file mode 100644
index 00000000..4eb1208f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
@@ -0,0 +1,207 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "encoding/binary"
+ "fmt"
+ "log"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/yamux"
+)
+
+// MuxBroker is responsible for brokering multiplexed connections by unique ID.
+//
+// It is used by plugins to multiplex multiple RPC connections and data
+// streams on top of a single connection between the plugin process and the
+// host process.
+//
+// This allows a plugin to request a channel with a specific ID to connect to
+// or accept a connection from, and the broker handles the details of
+// holding these channels open while they're being negotiated.
+//
+// The Plugin interface has access to these for both Server and Client.
+// The broker can be used by either (optionally) to reserve and connect to
+// new multiplexed streams. This is useful for complex args and return values,
+// or anything else you might need a data stream for.
+type MuxBroker struct {
+ nextId uint32
+ session *yamux.Session
+ streams map[uint32]*muxBrokerPending
+
+ sync.Mutex
+}
+
+type muxBrokerPending struct {
+ ch chan net.Conn
+ doneCh chan struct{}
+}
+
+func newMuxBroker(s *yamux.Session) *MuxBroker {
+ return &MuxBroker{
+ session: s,
+ streams: make(map[uint32]*muxBrokerPending),
+ }
+}
+
+// Accept accepts a connection by ID.
+//
+// This should not be called multiple times with the same ID at one time.
+func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
+ var c net.Conn
+ p := m.getStream(id)
+ select {
+ case c = <-p.ch:
+ close(p.doneCh)
+ case <-time.After(5 * time.Second):
+ m.Lock()
+ defer m.Unlock()
+ delete(m.streams, id)
+
+ return nil, fmt.Errorf("timeout waiting for accept")
+ }
+
+ // Ack our connection
+ if err := binary.Write(c, binary.LittleEndian, id); err != nil {
+ c.Close()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+// AcceptAndServe is used to accept a specific stream ID and immediately
+// serve an RPC server on that stream ID. This is used to easily serve
+// complex arguments.
+//
+// The served interface is always registered to the "Plugin" name.
+func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
+ conn, err := m.Accept(id)
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
+ return
+ }
+
+ serve(conn, "Plugin", v)
+}
+
+// Close closes the connection and all sub-connections.
+func (m *MuxBroker) Close() error {
+ return m.session.Close()
+}
+
+// Dial opens a connection by ID.
+func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
+ // Open the stream
+ stream, err := m.session.OpenStream()
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the stream ID onto the wire.
+ if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
+ stream.Close()
+ return nil, err
+ }
+
+ // Read the ack that we connected. Then we're off!
+ var ack uint32
+ if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
+ stream.Close()
+ return nil, err
+ }
+ if ack != id {
+ stream.Close()
+ return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
+ }
+
+ return stream, nil
+}
+
+// NextId returns a unique ID to use next.
+//
+// It is possible for very long-running plugin hosts to wrap this value,
+// though it would require a very large amount of RPC calls. In practice
+// we've never seen it happen.
+func (m *MuxBroker) NextId() uint32 {
+ return atomic.AddUint32(&m.nextId, 1)
+}
+
+// Run starts the brokering and should be executed in a goroutine, since it
+// blocks forever, or until the session closes.
+//
+// Uses of MuxBroker never need to call this. It is called internally by
+// the plugin host/client.
+func (m *MuxBroker) Run() {
+ for {
+ stream, err := m.session.AcceptStream()
+ if err != nil {
+ // Once we receive an error, just exit
+ break
+ }
+
+ // Read the stream ID from the stream
+ var id uint32
+ if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
+ stream.Close()
+ continue
+ }
+
+ // Initialize the waiter
+ p := m.getStream(id)
+ select {
+ case p.ch <- stream:
+ default:
+ }
+
+ // Wait for a timeout
+ go m.timeoutWait(id, p)
+ }
+}
+
+func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
+ m.Lock()
+ defer m.Unlock()
+
+ p, ok := m.streams[id]
+ if ok {
+ return p
+ }
+
+ m.streams[id] = &muxBrokerPending{
+ ch: make(chan net.Conn, 1),
+ doneCh: make(chan struct{}),
+ }
+ return m.streams[id]
+}
+
+func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
+ // Wait for the stream to either be picked up and connected, or
+ // for a timeout.
+ timeout := false
+ select {
+ case <-p.doneCh:
+ case <-time.After(5 * time.Second):
+ timeout = true
+ }
+
+ m.Lock()
+ defer m.Unlock()
+
+ // Delete the stream so no one else can grab it
+ delete(m.streams, id)
+
+ // If we timed out, then check if we have a channel in the buffer,
+ // and if so, close it.
+ if timeout {
+ select {
+ case s := <-p.ch:
+ s.Close()
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go
new file mode 100644
index 00000000..184749b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/plugin.go
@@ -0,0 +1,61 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// The plugin package exposes functions and helpers for communicating to
+// plugins which are implemented as standalone binary applications.
+//
+// plugin.Client fully manages the lifecycle of executing the application,
+// connecting to it, and returning the RPC client for dispensing plugins.
+//
+// plugin.Serve fully manages listeners to expose an RPC server from a binary
+// that plugin.Client can connect to.
+package plugin
+
+import (
+ "context"
+ "errors"
+ "net/rpc"
+
+ "google.golang.org/grpc"
+)
+
+// Plugin is the interface that is implemented to serve/connect to an
+// inteface implementation.
+type Plugin interface {
+ // Server should return the RPC server compatible struct to serve
+ // the methods that the Client calls over net/rpc.
+ Server(*MuxBroker) (interface{}, error)
+
+ // Client returns an interface implementation for the plugin you're
+ // serving that communicates to the server end of the plugin.
+ Client(*MuxBroker, *rpc.Client) (interface{}, error)
+}
+
+// GRPCPlugin is the interface that is implemented to serve/connect to
+// a plugin over gRPC.
+type GRPCPlugin interface {
+ // GRPCServer should register this plugin for serving with the
+ // given GRPCServer. Unlike Plugin.Server, this is only called once
+ // since gRPC plugins serve singletons.
+ GRPCServer(*GRPCBroker, *grpc.Server) error
+
+ // GRPCClient should return the interface implementation for the plugin
+ // you're serving via gRPC. The provided context will be canceled by
+ // go-plugin in the event of the plugin process exiting.
+ GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error)
+}
+
+// NetRPCUnsupportedPlugin implements Plugin but returns errors for the
+// Server and Client functions. This will effectively disable support for
+// net/rpc based plugins.
+//
+// This struct can be embedded in your struct.
+type NetRPCUnsupportedPlugin struct{}
+
+func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) {
+ return nil, errors.New("net/rpc plugin protocol not supported")
+}
+
+func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) {
+ return nil, errors.New("net/rpc plugin protocol not supported")
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go
new file mode 100644
index 00000000..b8844636
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process.go
@@ -0,0 +1,4 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go
new file mode 100644
index 00000000..e4b7be38
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/protocol.go
@@ -0,0 +1,48 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "io"
+ "net"
+)
+
+// Protocol is an enum representing the types of protocols.
+type Protocol string
+
+const (
+ ProtocolInvalid Protocol = ""
+ ProtocolNetRPC Protocol = "netrpc"
+ ProtocolGRPC Protocol = "grpc"
+)
+
+// ServerProtocol is an interface that must be implemented for new plugin
+// protocols to be servers.
+type ServerProtocol interface {
+ // Init is called once to configure and initialize the protocol, but
+ // not start listening. This is the point at which all validation should
+ // be done and errors returned.
+ Init() error
+
+ // Config is extra configuration to be outputted to stdout. This will
+ // be automatically base64 encoded to ensure it can be parsed properly.
+ // This can be an empty string if additional configuration is not needed.
+ Config() string
+
+ // Serve is called to serve connections on the given listener. This should
+ // continue until the listener is closed.
+ Serve(net.Listener)
+}
+
+// ClientProtocol is an interface that must be implemented for new plugin
+// protocols to be clients.
+type ClientProtocol interface {
+ io.Closer
+
+ // Dispense dispenses a new instance of the plugin with the given name.
+ Dispense(string) (interface{}, error)
+
+ // Ping checks that the client connection is still healthy.
+ Ping() error
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
new file mode 100644
index 00000000..142454df
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
@@ -0,0 +1,173 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/rpc"
+
+ "github.com/hashicorp/yamux"
+)
+
+// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
+type RPCClient struct {
+ broker *MuxBroker
+ control *rpc.Client
+ plugins map[string]Plugin
+
+ // These are the streams used for the various stdout/err overrides
+ stdout, stderr net.Conn
+}
+
+// newRPCClient creates a new RPCClient. The Client argument is expected
+// to be successfully started already with a lock held.
+func newRPCClient(c *Client) (*RPCClient, error) {
+ // Connect to the client
+ conn, err := net.Dial(c.address.Network(), c.address.String())
+ if err != nil {
+ return nil, err
+ }
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ // Make sure to set keep alive so that the connection doesn't die
+ tcpConn.SetKeepAlive(true)
+ }
+
+ if c.config.TLSConfig != nil {
+ conn = tls.Client(conn, c.config.TLSConfig)
+ }
+
+ // Create the actual RPC client
+ result, err := NewRPCClient(conn, c.config.Plugins)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Begin the stream syncing so that stdin, out, err work properly
+ err = result.SyncStreams(
+ c.config.SyncStdout,
+ c.config.SyncStderr)
+ if err != nil {
+ result.Close()
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// NewRPCClient creates a client from an already-open connection-like value.
+// Dial is typically used instead.
+func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
+ // Create the yamux client so we can multiplex
+ mux, err := yamux.Client(conn, nil)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Connect to the control stream.
+ control, err := mux.Open()
+ if err != nil {
+ mux.Close()
+ return nil, err
+ }
+
+ // Connect stdout, stderr streams
+ stdstream := make([]net.Conn, 2)
+ for i, _ := range stdstream {
+ stdstream[i], err = mux.Open()
+ if err != nil {
+ mux.Close()
+ return nil, err
+ }
+ }
+
+ // Create the broker and start it up
+ broker := newMuxBroker(mux)
+ go broker.Run()
+
+ // Build the client using our broker and control channel.
+ return &RPCClient{
+ broker: broker,
+ control: rpc.NewClient(control),
+ plugins: plugins,
+ stdout: stdstream[0],
+ stderr: stdstream[1],
+ }, nil
+}
+
+// SyncStreams should be called to enable syncing of stdout,
+// stderr with the plugin.
+//
+// This will return immediately and the syncing will continue to happen
+// in the background. You do not need to launch this in a goroutine itself.
+//
+// This should never be called multiple times.
+func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
+ go copyStream("stdout", stdout, c.stdout)
+ go copyStream("stderr", stderr, c.stderr)
+ return nil
+}
+
+// Close closes the connection. The client is no longer usable after this
+// is called.
+func (c *RPCClient) Close() error {
+ // Call the control channel and ask it to gracefully exit. If this
+ // errors, then we save it so that we always return an error but we
+ // want to try to close the other channels anyways.
+ var empty struct{}
+ returnErr := c.control.Call("Control.Quit", true, &empty)
+
+ // Close the other streams we have
+ if err := c.control.Close(); err != nil {
+ return err
+ }
+ if err := c.stdout.Close(); err != nil {
+ return err
+ }
+ if err := c.stderr.Close(); err != nil {
+ return err
+ }
+ if err := c.broker.Close(); err != nil {
+ return err
+ }
+
+ // Return back the error we got from Control.Quit. This is very important
+ // since we MUST return non-nil error if this fails so that Client.Kill
+ // will properly try a process.Kill.
+ return returnErr
+}
+
+func (c *RPCClient) Dispense(name string) (interface{}, error) {
+ p, ok := c.plugins[name]
+ if !ok {
+ return nil, fmt.Errorf("unknown plugin type: %s", name)
+ }
+
+ var id uint32
+ if err := c.control.Call(
+ "Dispenser.Dispense", name, &id); err != nil {
+ return nil, err
+ }
+
+ conn, err := c.broker.Dial(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.Client(c.broker, rpc.NewClient(conn))
+}
+
+// Ping pings the connection to ensure it is still alive.
+//
+// The error from the RPC call is returned exactly if you want to inspect
+// it for further error analysis. Any error returned from here would indicate
+// that the connection to the plugin is not healthy.
+func (c *RPCClient) Ping() error {
+ var empty struct{}
+ return c.control.Call("Control.Ping", true, &empty)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
new file mode 100644
index 00000000..cec0a3d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
@@ -0,0 +1,209 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/rpc"
+ "sync"
+
+ "github.com/hashicorp/yamux"
+)
+
+// RPCServer listens for network connections and then dispenses interface
+// implementations over net/rpc.
+//
+// After setting the fields below, they shouldn't be read again directly
+// from the structure which may be reading/writing them concurrently.
+type RPCServer struct {
+ Plugins map[string]Plugin
+
+ // Stdout, Stderr are what this server will use instead of the
+ // normal stdin/out/err. This is because due to the multi-process nature
+ // of our plugin system, we can't use the normal process values so we
+ // make our own custom one we pipe across.
+ Stdout io.Reader
+ Stderr io.Reader
+
+ // DoneCh should be set to a non-nil channel that will be closed
+ // when the control requests the RPC server to end.
+ DoneCh chan<- struct{}
+
+ lock sync.Mutex
+}
+
+// ServerProtocol impl.
+func (s *RPCServer) Init() error { return nil }
+
+// ServerProtocol impl.
+func (s *RPCServer) Config() string { return "" }
+
+// ServerProtocol impl.
+func (s *RPCServer) Serve(lis net.Listener) {
+ defer s.done()
+
+ for {
+ conn, err := lis.Accept()
+ if err != nil {
+ severity := "ERR"
+ if errors.Is(err, net.ErrClosed) {
+ severity = "DEBUG"
+ }
+ log.Printf("[%s] plugin: plugin server: %s", severity, err)
+ return
+ }
+
+ go s.ServeConn(conn)
+ }
+}
+
+// ServeConn runs a single connection.
+//
+// ServeConn blocks, serving the connection until the client hangs up.
+func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
+ // First create the yamux server to wrap this connection
+ mux, err := yamux.Server(conn, nil)
+ if err != nil {
+ conn.Close()
+ log.Printf("[ERR] plugin: error creating yamux server: %s", err)
+ return
+ }
+
+ // Accept the control connection
+ control, err := mux.Accept()
+ if err != nil {
+ mux.Close()
+ if err != io.EOF {
+ log.Printf("[ERR] plugin: error accepting control connection: %s", err)
+ }
+
+ return
+ }
+
+ // Connect the stdstreams (in, out, err)
+ stdstream := make([]net.Conn, 2)
+ for i := range stdstream {
+ stdstream[i], err = mux.Accept()
+ if err != nil {
+ mux.Close()
+ log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
+ return
+ }
+ }
+
+ // Copy std streams out to the proper place
+ go copyStream("stdout", stdstream[0], s.Stdout)
+ go copyStream("stderr", stdstream[1], s.Stderr)
+
+ // Create the broker and start it up
+ broker := newMuxBroker(mux)
+ go broker.Run()
+
+ // Use the control connection to build the dispenser and serve the
+ // connection.
+ server := rpc.NewServer()
+ server.RegisterName("Control", &controlServer{
+ server: s,
+ })
+ server.RegisterName("Dispenser", &dispenseServer{
+ broker: broker,
+ plugins: s.Plugins,
+ })
+ server.ServeConn(control)
+}
+
+// done is called internally by the control server to trigger the
+// doneCh to close which is listened to by the main process to cleanly
+// exit.
+func (s *RPCServer) done() {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if s.DoneCh != nil {
+ close(s.DoneCh)
+ s.DoneCh = nil
+ }
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type controlServer struct {
+ server *RPCServer
+}
+
+// Ping can be called to verify the connection (and likely the binary)
+// is still alive to a plugin.
+func (c *controlServer) Ping(
+ null bool, response *struct{},
+) error {
+ *response = struct{}{}
+ return nil
+}
+
+func (c *controlServer) Quit(
+ null bool, response *struct{},
+) error {
+ // End the server
+ c.server.done()
+
+ // Always return true
+ *response = struct{}{}
+
+ return nil
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type dispenseServer struct {
+ broker *MuxBroker
+ plugins map[string]Plugin
+}
+
+func (d *dispenseServer) Dispense(
+ name string, response *uint32,
+) error {
+ // Find the function to create this implementation
+ p, ok := d.plugins[name]
+ if !ok {
+ return fmt.Errorf("unknown plugin type: %s", name)
+ }
+
+ // Create the implementation first so we know if there is an error.
+ impl, err := p.Server(d.broker)
+ if err != nil {
+ // We turn the error into an errors error so that it works across RPC
+ return errors.New(err.Error())
+ }
+
+ // Reserve an ID for our implementation
+ id := d.broker.NextId()
+ *response = id
+
+ // Run the rest in a goroutine since it can only happen once this RPC
+ // call returns. We wait for a connection for the plugin implementation
+ // and serve it.
+ go func() {
+ conn, err := d.broker.Accept(id)
+ if err != nil {
+ log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
+ return
+ }
+
+ serve(conn, "Plugin", impl)
+ }()
+
+ return nil
+}
+
+func serve(conn io.ReadWriteCloser, name string, v interface{}) {
+ server := rpc.NewServer()
+ if err := server.RegisterName(name, v); err != nil {
+ log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
+ return
+ }
+
+ server.ServeConn(conn)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/runner/runner.go b/vendor/github.com/hashicorp/go-plugin/runner/runner.go
new file mode 100644
index 00000000..e638ae5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/runner/runner.go
@@ -0,0 +1,72 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package runner
+
+import (
+ "context"
+ "io"
+)
+
+// Runner defines the interface required by go-plugin to manage the lifecycle of
+// of a plugin and attempt to negotiate a connection with it. Note that this
+// is orthogonal to the protocol and transport used, which is negotiated over stdout.
+type Runner interface {
+ // Start should start the plugin and ensure any work required for servicing
+ // other interface methods is done. If the context is cancelled, it should
+ // only abort any attempts to _start_ the plugin. Waiting and shutdown are
+ // handled separately.
+ Start(ctx context.Context) error
+
+ // Diagnose makes a best-effort attempt to return any debug information that
+ // might help users understand why a plugin failed to start and negotiate a
+ // connection.
+ Diagnose(ctx context.Context) string
+
+ // Stdout is used to negotiate the go-plugin protocol.
+ Stdout() io.ReadCloser
+
+ // Stderr is used for forwarding plugin logs to the host process logger.
+ Stderr() io.ReadCloser
+
+ // Name is a human-friendly name for the plugin, such as the path to the
+ // executable. It does not have to be unique.
+ Name() string
+
+ AttachedRunner
+}
+
+// AttachedRunner defines a limited subset of Runner's interface to represent the
+// reduced responsibility for plugin lifecycle when attaching to an already running
+// plugin.
+type AttachedRunner interface {
+ // Wait should wait until the plugin stops running, whether in response to
+ // an out of band signal or in response to calling Kill().
+ Wait(ctx context.Context) error
+
+ // Kill should stop the plugin and perform any cleanup required.
+ Kill(ctx context.Context) error
+
+ // ID is a unique identifier to represent the running plugin. e.g. pid or
+ // container ID.
+ ID() string
+
+ AddrTranslator
+}
+
+// AddrTranslator translates addresses between the execution context of the host
+// process and the plugin. For example, if the plugin is in a container, the file
+// path for a Unix socket may be different between the host and the container.
+//
+// It is only intended to be used by the host process.
+type AddrTranslator interface {
+ // Called before connecting on any addresses received back from the plugin.
+ PluginToHost(pluginNet, pluginAddr string) (hostNet string, hostAddr string, err error)
+
+ // Called on any host process addresses before they are sent to the plugin.
+ HostToPlugin(hostNet, hostAddr string) (pluginNet string, pluginAddr string, err error)
+}
+
+// ReattachFunc can be passed to a client's reattach config to reattach to an
+// already running plugin instead of starting it ourselves.
+type ReattachFunc func() (AttachedRunner, error)
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
new file mode 100644
index 00000000..e741bc7f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server.go
@@ -0,0 +1,665 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "os/signal"
+ "os/user"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-plugin/internal/grpcmux"
+ "google.golang.org/grpc"
+)
+
+// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
+// We will increment this whenever we change any protocol behavior. This
+// will invalidate any prior plugins but will at least allow us to iterate
+// on the core in a safe way. We will do our best to do this very
+// infrequently.
+const CoreProtocolVersion = 1
+
+// HandshakeConfig is the configuration used by client and servers to
+// handshake before starting a plugin connection. This is embedded by
+// both ServeConfig and ClientConfig.
+//
+// In practice, the plugin host creates a HandshakeConfig that is exported
+// and plugins then can easily consume it.
+type HandshakeConfig struct {
+ // ProtocolVersion is the version that clients must match on to
+ // agree they can communicate. This should match the ProtocolVersion
+ // set on ClientConfig when using a plugin.
+ // This field is not required if VersionedPlugins are being used in the
+ // Client or Server configurations.
+ ProtocolVersion uint
+
+ // MagicCookieKey and value are used as a very basic verification
+ // that a plugin is intended to be launched. This is not a security
+ // measure, just a UX feature. If the magic cookie doesn't match,
+ // we show human-friendly output.
+ MagicCookieKey string
+ MagicCookieValue string
+}
+
+// PluginSet is a set of plugins provided to be registered in the plugin
+// server.
+type PluginSet map[string]Plugin
+
+// ServeConfig configures what sorts of plugins are served.
+type ServeConfig struct {
+ // HandshakeConfig is the configuration that must match clients.
+ HandshakeConfig
+
+ // TLSProvider is a function that returns a configured tls.Config.
+ TLSProvider func() (*tls.Config, error)
+
+ // Plugins are the plugins that are served.
+ // The implied version of this PluginSet is the Handshake.ProtocolVersion.
+ Plugins PluginSet
+
+ // VersionedPlugins is a map of PluginSets for specific protocol versions.
+ // These can be used to negotiate a compatible version between client and
+ // server. If this is set, Handshake.ProtocolVersion is not required.
+ VersionedPlugins map[int]PluginSet
+
+ // GRPCServer should be non-nil to enable serving the plugins over
+ // gRPC. This is a function to create the server when needed with the
+ // given server options. The server options populated by go-plugin will
+ // be for TLS if set. You may modify the input slice.
+ //
+ // Note that the grpc.Server will automatically be registered with
+ // the gRPC health checking service. This is not optional since go-plugin
+ // relies on this to implement Ping().
+ GRPCServer func([]grpc.ServerOption) *grpc.Server
+
+ // Logger is used to pass a logger into the server. If none is provided the
+ // server will create a default logger.
+ Logger hclog.Logger
+
+ // Test, if non-nil, will put plugin serving into "test mode". This is
+ // meant to be used as part of `go test` within a plugin's codebase to
+ // launch the plugin in-process and output a ReattachConfig.
+ //
+ // This changes the behavior of the server in a number of ways to
+ // accomodate the expectation of running in-process:
+ //
+ // * The handshake cookie is not validated.
+ // * Stdout/stderr will receive plugin reads and writes
+ // * Connection information will not be sent to stdout
+ //
+ Test *ServeTestConfig
+}
+
+// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test.
+type ServeTestConfig struct {
+ // Context, if set, will force the plugin serving to end when cancelled.
+ // This is only a test configuration because the non-test configuration
+ // expects to take over the process and therefore end on an interrupt or
+ // kill signal. For tests, we need to kill the plugin serving routinely
+ // and this provides a way to do so.
+ //
+ // If you want to wait for the plugin process to close before moving on,
+ // you can wait on CloseCh.
+ Context context.Context
+
+ // If this channel is non-nil, we will send the ReattachConfig via
+ // this channel. This can be encoded (via JSON recommended) to the
+ // plugin client to attach to this plugin.
+ ReattachConfigCh chan<- *ReattachConfig
+
+ // CloseCh, if non-nil, will be closed when serving exits. This can be
+ // used along with Context to determine when the server is fully shut down.
+ // If this is not set, you can still use Context on its own, but note there
+ // may be a period of time between canceling the context and the plugin
+ // server being shut down.
+ CloseCh chan<- struct{}
+
+ // SyncStdio, if true, will enable the client side "SyncStdout/Stderr"
+ // functionality to work. This defaults to false because the implementation
+ // of making this work within test environments is particularly messy
+ // and SyncStdio functionality is fairly rare, so we default to the simple
+ // scenario.
+ SyncStdio bool
+}
+
+func unixSocketConfigFromEnv() UnixSocketConfig {
+ return UnixSocketConfig{
+ Group: os.Getenv(EnvUnixSocketGroup),
+ socketDir: os.Getenv(EnvUnixSocketDir),
+ }
+}
+
+// protocolVersion determines the protocol version and plugin set to be used by
+// the server. In the event that there is no suitable version, the last version
+// in the config is returned leaving the client to report the incompatibility.
+func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) {
+ protoVersion := int(opts.ProtocolVersion)
+ pluginSet := opts.Plugins
+ protoType := ProtocolNetRPC
+ // Check if the client sent a list of acceptable versions
+ var clientVersions []int
+ if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" {
+ for _, s := range strings.Split(vs, ",") {
+ v, err := strconv.Atoi(s)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s)
+ continue
+ }
+ clientVersions = append(clientVersions, v)
+ }
+ }
+
+ // We want to iterate in reverse order, to ensure we match the newest
+ // compatible plugin version.
+ sort.Sort(sort.Reverse(sort.IntSlice(clientVersions)))
+
+ // set the old un-versioned fields as if they were versioned plugins
+ if opts.VersionedPlugins == nil {
+ opts.VersionedPlugins = make(map[int]PluginSet)
+ }
+
+ if pluginSet != nil {
+ opts.VersionedPlugins[protoVersion] = pluginSet
+ }
+
+ // Sort the version to make sure we match the latest first
+ var versions []int
+ for v := range opts.VersionedPlugins {
+ versions = append(versions, v)
+ }
+
+ sort.Sort(sort.Reverse(sort.IntSlice(versions)))
+
+ // See if we have multiple versions of Plugins to choose from
+ for _, version := range versions {
+ // Record each version, since we guarantee that this returns valid
+ // values even if they are not a protocol match.
+ protoVersion = version
+ pluginSet = opts.VersionedPlugins[version]
+
+ // If we have a configured gRPC server we should select a protocol
+ if opts.GRPCServer != nil {
+ // All plugins in a set must use the same transport, so check the first
+ // for the protocol type
+ for _, p := range pluginSet {
+ switch p.(type) {
+ case GRPCPlugin:
+ protoType = ProtocolGRPC
+ default:
+ protoType = ProtocolNetRPC
+ }
+ break
+ }
+ }
+
+ for _, clientVersion := range clientVersions {
+ if clientVersion == protoVersion {
+ return protoVersion, protoType, pluginSet
+ }
+ }
+ }
+
+ // Return the lowest version as the fallback.
+ // Since we iterated over all the versions in reverse order above, these
+ // values are from the lowest version number plugins (which may be from
+ // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins
+ // fields). This allows serving the oldest version of our plugins to a
+ // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list.
+ return protoVersion, protoType, pluginSet
+}
+
+// Serve serves the plugins given by ServeConfig.
+//
+// Serve doesn't return until the plugin is done being executed. Any
+// fixable errors will be output to os.Stderr and the process will
+// exit with a status code of 1. Serve will panic for unexpected
+// conditions where a user's fix is unknown.
+//
+// This is the method that plugins should call in their main() functions.
+func Serve(opts *ServeConfig) {
+ exitCode := -1
+ // We use this to trigger an `os.Exit` so that we can execute our other
+ // deferred functions. In test mode, we just output the err to stderr
+ // and return.
+ defer func() {
+ if opts.Test == nil && exitCode >= 0 {
+ os.Exit(exitCode)
+ }
+
+ if opts.Test != nil && opts.Test.CloseCh != nil {
+ close(opts.Test.CloseCh)
+ }
+ }()
+
+ if opts.Test == nil {
+ // Validate the handshake config
+ if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
+ fmt.Fprintf(os.Stderr,
+ "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
+ "key or value was set. Please notify the plugin author and report\n"+
+ "this as a bug.\n")
+ exitCode = 1
+ return
+ }
+
+ // First check the cookie
+ if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
+ fmt.Fprintf(os.Stderr,
+ "This binary is a plugin. These are not meant to be executed directly.\n"+
+ "Please execute the program that consumes these plugins, which will\n"+
+ "load any plugins automatically\n")
+ exitCode = 1
+ return
+ }
+ }
+
+ // negotiate the version and plugins
+ // start with default version in the handshake config
+ protoVersion, protoType, pluginSet := protocolVersion(opts)
+
+ logger := opts.Logger
+ if logger == nil {
+ // internal logger to os.Stderr
+ logger = hclog.New(&hclog.LoggerOptions{
+ Level: hclog.Trace,
+ Output: os.Stderr,
+ JSONFormat: true,
+ })
+ }
+
+ // Register a listener so we can accept a connection
+ listener, err := serverListener(unixSocketConfigFromEnv())
+ if err != nil {
+ logger.Error("plugin init error", "error", err)
+ return
+ }
+
+ // Close the listener on return. We wrap this in a func() on purpose
+ // because the "listener" reference may change to TLS.
+ defer func() {
+ listener.Close()
+ }()
+
+ var tlsConfig *tls.Config
+ if opts.TLSProvider != nil {
+ tlsConfig, err = opts.TLSProvider()
+ if err != nil {
+ logger.Error("plugin tls init", "error", err)
+ return
+ }
+ }
+
+ var serverCert string
+ clientCert := os.Getenv("PLUGIN_CLIENT_CERT")
+ // If the client is configured using AutoMTLS, the certificate will be here,
+ // and we need to generate our own in response.
+ if tlsConfig == nil && clientCert != "" {
+ logger.Info("configuring server automatic mTLS")
+ clientCertPool := x509.NewCertPool()
+ if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) {
+ logger.Error("client cert provided but failed to parse", "cert", clientCert)
+ }
+
+ certPEM, keyPEM, err := generateCert()
+ if err != nil {
+ logger.Error("failed to generate server certificate", "error", err)
+ panic(err)
+ }
+
+ cert, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ logger.Error("failed to parse server certificate", "error", err)
+ panic(err)
+ }
+
+ tlsConfig = &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ ClientCAs: clientCertPool,
+ MinVersion: tls.VersionTLS12,
+ RootCAs: clientCertPool,
+ ServerName: "localhost",
+ }
+
+ // We send back the raw leaf cert data for the client rather than the
+ // PEM, since the protocol can't handle newlines.
+ serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0])
+ }
+
+ // Create the channel to tell us when we're done
+ doneCh := make(chan struct{})
+
+ // Create our new stdout, stderr files. These will override our built-in
+ // stdout/stderr so that it works across the stream boundary.
+ var stdout_r, stderr_r io.Reader
+ stdout_r, stdout_w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+ os.Exit(1)
+ }
+ stderr_r, stderr_w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+ os.Exit(1)
+ }
+
+ // If we're in test mode, we tee off the reader and write the data
+ // as-is to our normal Stdout and Stderr so that they continue working
+ // while stdio works. This is because in test mode, we assume we're running
+ // in `go test` or some equivalent and we want output to go to standard
+ // locations.
+ if opts.Test != nil {
+ // TODO(mitchellh): This isn't super ideal because a TeeReader
+ // only works if the reader side is actively read. If we never
+ // connect via a plugin client, the output still gets swallowed.
+ stdout_r = io.TeeReader(stdout_r, os.Stdout)
+ stderr_r = io.TeeReader(stderr_r, os.Stderr)
+ }
+
+ // Build the server type
+ var server ServerProtocol
+ switch protoType {
+ case ProtocolNetRPC:
+ // If we have a TLS configuration then we wrap the listener
+ // ourselves and do it at that level.
+ if tlsConfig != nil {
+ listener = tls.NewListener(listener, tlsConfig)
+ }
+
+ // Create the RPC server to dispense
+ server = &RPCServer{
+ Plugins: pluginSet,
+ Stdout: stdout_r,
+ Stderr: stderr_r,
+ DoneCh: doneCh,
+ }
+
+ case ProtocolGRPC:
+ var muxer *grpcmux.GRPCServerMuxer
+ if multiplex, _ := strconv.ParseBool(os.Getenv(envMultiplexGRPC)); multiplex {
+ muxer = grpcmux.NewGRPCServerMuxer(logger, listener)
+ listener = muxer
+ }
+
+ // Create the gRPC server
+ server = &GRPCServer{
+ Plugins: pluginSet,
+ Server: opts.GRPCServer,
+ TLS: tlsConfig,
+ Stdout: stdout_r,
+ Stderr: stderr_r,
+ DoneCh: doneCh,
+ logger: logger,
+ muxer: muxer,
+ }
+
+ default:
+ panic("unknown server protocol: " + protoType)
+ }
+
+ // Initialize the servers
+ if err := server.Init(); err != nil {
+ logger.Error("protocol init", "error", err)
+ return
+ }
+
+ logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String())
+
+ // Output the address and service name to stdout so that the client can
+ // bring it up. In test mode, we don't do this because clients will
+ // attach via a reattach config.
+ if opts.Test == nil {
+ const grpcBrokerMultiplexingSupported = true
+ protocolLine := fmt.Sprintf("%d|%d|%s|%s|%s|%s",
+ CoreProtocolVersion,
+ protoVersion,
+ listener.Addr().Network(),
+ listener.Addr().String(),
+ protoType,
+ serverCert)
+
+ // Old clients will error with new plugins if we blindly append the
+ // seventh segment for gRPC broker multiplexing support, because old
+ // client code uses strings.SplitN(line, "|", 6), which means a seventh
+ // segment will get appended to the sixth segment as "sixthpart|true".
+ //
+ // If the environment variable is set, we assume the client is new enough
+ // to handle a seventh segment, as it should now use
+ // strings.Split(line, "|") and always handle each segment individually.
+ if os.Getenv(envMultiplexGRPC) != "" {
+ protocolLine += fmt.Sprintf("|%v", grpcBrokerMultiplexingSupported)
+ }
+ fmt.Printf("%s\n", protocolLine)
+ os.Stdout.Sync()
+ } else if ch := opts.Test.ReattachConfigCh; ch != nil {
+ // Send back the reattach config that can be used. This isn't
+ // quite ready if they connect immediately but the client should
+ // retry a few times.
+ ch <- &ReattachConfig{
+ Protocol: protoType,
+ ProtocolVersion: protoVersion,
+ Addr: listener.Addr(),
+ Pid: os.Getpid(),
+ Test: true,
+ }
+ }
+
+ // Eat the interrupts. In test mode we disable this so that go test
+ // can be cancelled properly.
+ if opts.Test == nil {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, os.Interrupt)
+ go func() {
+ count := 0
+ for {
+ <-ch
+ count++
+ logger.Trace("plugin received interrupt signal, ignoring", "count", count)
+ }
+ }()
+ }
+
+ // Set our stdout, stderr to the stdio stream that clients can retrieve
+ // using ClientConfig.SyncStdout/err. We only do this for non-test mode
+ // or if the test mode explicitly requests it.
+ //
+ // In test mode, we use a multiwriter so that the data continues going
+ // to the normal stdout/stderr so output can show up in test logs. We
+ // also send to the stdio stream so that clients can continue working
+ // if they depend on that.
+ if opts.Test == nil || opts.Test.SyncStdio {
+ if opts.Test != nil {
+ // In test mode we need to maintain the original values so we can
+ // reset it.
+ defer func(out, err *os.File) {
+ os.Stdout = out
+ os.Stderr = err
+ }(os.Stdout, os.Stderr)
+ }
+ os.Stdout = stdout_w
+ os.Stderr = stderr_w
+ }
+
+ // Accept connections and wait for completion
+ go server.Serve(listener)
+
+ ctx := context.Background()
+ if opts.Test != nil && opts.Test.Context != nil {
+ ctx = opts.Test.Context
+ }
+ select {
+ case <-ctx.Done():
+ // Cancellation. We can stop the server by closing the listener.
+ // This isn't graceful at all but this is currently only used by
+ // tests and its our only way to stop.
+ listener.Close()
+
+ // If this is a grpc server, then we also ask the server itself to
+ // end which will kill all connections. There isn't an easy way to do
+ // this for net/rpc currently but net/rpc is more and more unused.
+ if s, ok := server.(*GRPCServer); ok {
+ s.Stop()
+ }
+
+ // Wait for the server itself to shut down
+ <-doneCh
+
+ case <-doneCh:
+ // Note that given the documentation of Serve we should probably be
+ // setting exitCode = 0 and using os.Exit here. That's how it used to
+ // work before extracting this library. However, for years we've done
+ // this so we'll keep this functionality.
+ }
+}
+
+func serverListener(unixSocketCfg UnixSocketConfig) (net.Listener, error) {
+ if runtime.GOOS == "windows" {
+ return serverListener_tcp()
+ }
+
+ return serverListener_unix(unixSocketCfg)
+}
+
+func serverListener_tcp() (net.Listener, error) {
+ envMinPort := os.Getenv("PLUGIN_MIN_PORT")
+ envMaxPort := os.Getenv("PLUGIN_MAX_PORT")
+
+ var minPort, maxPort int64
+ var err error
+
+ switch {
+ case len(envMinPort) == 0:
+ minPort = 0
+ default:
+ minPort, err = strconv.ParseInt(envMinPort, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err)
+ }
+ }
+
+ switch {
+ case len(envMaxPort) == 0:
+ maxPort = 0
+ default:
+ maxPort, err = strconv.ParseInt(envMaxPort, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err)
+ }
+ }
+
+ if minPort > maxPort {
+ return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
+ }
+
+ for port := minPort; port <= maxPort; port++ {
+ address := fmt.Sprintf("127.0.0.1:%d", port)
+ listener, err := net.Listen("tcp", address)
+ if err == nil {
+ return listener, nil
+ }
+ }
+
+ return nil, errors.New("Couldn't bind plugin TCP listener")
+}
+
+func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) {
+ tf, err := os.CreateTemp(unixSocketCfg.socketDir, "plugin")
+ if err != nil {
+ return nil, err
+ }
+ path := tf.Name()
+
+ // Close the file and remove it because it has to not exist for
+ // the domain socket.
+ if err := tf.Close(); err != nil {
+ return nil, err
+ }
+ if err := os.Remove(path); err != nil {
+ return nil, err
+ }
+
+ l, err := net.Listen("unix", path)
+ if err != nil {
+ return nil, err
+ }
+
+ // By default, unix sockets are only writable by the owner. Set up a custom
+ // group owner and group write permissions if configured.
+ if unixSocketCfg.Group != "" {
+ err = setGroupWritable(path, unixSocketCfg.Group, 0o660)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Wrap the listener in rmListener so that the Unix domain socket file
+ // is removed on close.
+ return newDeleteFileListener(l, path), nil
+}
+
+func setGroupWritable(path, groupString string, mode os.FileMode) error {
+ groupID, err := strconv.Atoi(groupString)
+ if err != nil {
+ group, err := user.LookupGroup(groupString)
+ if err != nil {
+ return fmt.Errorf("failed to find gid from %q: %w", groupString, err)
+ }
+ groupID, err = strconv.Atoi(group.Gid)
+ if err != nil {
+ return fmt.Errorf("failed to parse %q group's gid as an integer: %w", groupString, err)
+ }
+ }
+
+ err = os.Chown(path, -1, groupID)
+ if err != nil {
+ return err
+ }
+
+ err = os.Chmod(path, mode)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// rmListener is an implementation of net.Listener that forwards most
+// calls to the listener but also calls an additional close function. We
+// use this to cleanup the unix domain socket on close, as well as clean
+// up multiplexed listeners.
+type rmListener struct {
+ net.Listener
+ close func() error
+}
+
+func newDeleteFileListener(ln net.Listener, path string) *rmListener {
+ return &rmListener{
+ Listener: ln,
+ close: func() error {
+ return os.Remove(path)
+ },
+ }
+}
+
+func (l *rmListener) Close() error {
+ // Close the listener itself
+ if err := l.Listener.Close(); err != nil {
+ return err
+ }
+
+ // Remove the file
+ return l.close()
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go
new file mode 100644
index 00000000..6b14b0c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go
@@ -0,0 +1,34 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "fmt"
+ "os"
+)
+
+// ServeMuxMap is the type that is used to configure ServeMux
+type ServeMuxMap map[string]*ServeConfig
+
+// ServeMux is like Serve, but serves multiple types of plugins determined
+// by the argument given on the command-line.
+//
+// This command doesn't return until the plugin is done being executed. Any
+// errors are logged or output to stderr.
+func ServeMux(m ServeMuxMap) {
+ if len(os.Args) != 2 {
+ fmt.Fprintf(os.Stderr,
+ "Invoked improperly. This is an internal command that shouldn't\n"+
+ "be manually invoked.\n")
+ os.Exit(1)
+ }
+
+ opts, ok := m[os.Args[1]]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
+ os.Exit(1)
+ }
+
+ Serve(opts)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go
new file mode 100644
index 00000000..a2348642
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/stream.go
@@ -0,0 +1,21 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "io"
+ "log"
+)
+
+func copyStream(name string, dst io.Writer, src io.Reader) {
+ if src == nil {
+ panic(name + ": src is nil")
+ }
+ if dst == nil {
+ panic(name + ": dst is nil")
+ }
+ if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
+ log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
new file mode 100644
index 00000000..a8735dfc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/testing.go
@@ -0,0 +1,185 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package plugin
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "net"
+ "net/rpc"
+
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-plugin/internal/grpcmux"
+ "github.com/mitchellh/go-testing-interface"
+ "google.golang.org/grpc"
+)
+
+// TestOptions allows specifying options that can affect the behavior of the
+// test functions
+type TestOptions struct {
+ //ServerStdout causes the given value to be used in place of a blank buffer
+ //for RPCServer's Stdout
+ ServerStdout io.ReadCloser
+
+ //ServerStderr causes the given value to be used in place of a blank buffer
+ //for RPCServer's Stderr
+ ServerStderr io.ReadCloser
+}
+
+// The testing file contains test helpers that you can use outside of
+// this package for making it easier to test plugins themselves.
+
+// TestConn is a helper function for returning a client and server
+// net.Conn connected to each other.
+func TestConn(t testing.T) (net.Conn, net.Conn) {
+ // Listen to any local port. This listener will be closed
+ // after a single connection is established.
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Start a goroutine to accept our client connection
+ var serverConn net.Conn
+ doneCh := make(chan struct{})
+ go func() {
+ defer close(doneCh)
+ defer l.Close()
+ var err error
+ serverConn, err = l.Accept()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ }()
+
+ // Connect to the server
+ clientConn, err := net.Dial("tcp", l.Addr().String())
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Wait for the server side to acknowledge it has connected
+ <-doneCh
+
+ return clientConn, serverConn
+}
+
+// TestRPCConn returns a rpc client and server connected to each other.
+func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) {
+ clientConn, serverConn := TestConn(t)
+
+ server := rpc.NewServer()
+ go server.ServeConn(serverConn)
+
+ client := rpc.NewClient(clientConn)
+ return client, server
+}
+
+// TestPluginRPCConn returns a plugin RPC client and server that are connected
+// together and configured.
+func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) {
+ // Create two net.Conns we can use to shuttle our control connection
+ clientConn, serverConn := TestConn(t)
+
+ // Start up the server
+ server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
+ if opts != nil {
+ if opts.ServerStdout != nil {
+ server.Stdout = opts.ServerStdout
+ }
+ if opts.ServerStderr != nil {
+ server.Stderr = opts.ServerStderr
+ }
+ }
+ go server.ServeConn(serverConn)
+
+ // Connect the client to the server
+ client, err := NewRPCClient(clientConn, ps)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return client, server
+}
+
+// TestGRPCConn returns a gRPC client conn and grpc server that are connected
+// together and configured. The register function is used to register services
+// prior to the Serve call. This is used to test gRPC connections.
+func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) {
+ // Create a listener
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ server := grpc.NewServer()
+ register(server)
+ go server.Serve(l)
+
+ // Connect to the server
+ conn, err := grpc.Dial(
+ l.Addr().String(),
+ grpc.WithBlock(),
+ grpc.WithInsecure())
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Connection successful, close the listener
+ l.Close()
+
+ return conn, server
+}
+
+// TestPluginGRPCConn returns a plugin gRPC client and server that are connected
+// together and configured. This is used to test gRPC connections.
+func TestPluginGRPCConn(t testing.T, multiplex bool, ps map[string]Plugin) (*GRPCClient, *GRPCServer) {
+ // Create a listener
+ ln, err := serverListener(UnixSocketConfig{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ logger := hclog.New(&hclog.LoggerOptions{
+ Level: hclog.Debug,
+ })
+
+ // Start up the server
+ var muxer *grpcmux.GRPCServerMuxer
+ if multiplex {
+ muxer = grpcmux.NewGRPCServerMuxer(logger, ln)
+ ln = muxer
+ }
+ server := &GRPCServer{
+ Plugins: ps,
+ DoneCh: make(chan struct{}),
+ Server: DefaultGRPCServer,
+ Stdout: new(bytes.Buffer),
+ Stderr: new(bytes.Buffer),
+ logger: logger,
+ muxer: muxer,
+ }
+ if err := server.Init(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ go server.Serve(ln)
+
+ client := &Client{
+ address: ln.Addr(),
+ protocol: ProtocolGRPC,
+ config: &ClientConfig{
+ Plugins: ps,
+ GRPCBrokerMultiplex: multiplex,
+ },
+ logger: logger,
+ }
+
+ grpcClient, err := newGRPCClient(context.Background(), client)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return grpcClient, server
+}
diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore
new file mode 100644
index 00000000..83656241
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE
new file mode 100644
index 00000000..f0e5c79e
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md
new file mode 100644
index 00000000..d4db7fc9
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/README.md
@@ -0,0 +1,86 @@
+# Yamux
+
+Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
+It relies on an underlying connection to provide reliability
+and ordering, such as TCP or Unix domain sockets, and provides
+stream-oriented multiplexing. It is inspired by SPDY but is not
+interoperable with it.
+
+Yamux features include:
+
+* Bi-directional streams
+ * Streams can be opened by either client or server
+ * Useful for NAT traversal
+ * Server-side push support
+* Flow control
+ * Avoid starvation
+ * Back-pressure to prevent overwhelming a receiver
+* Keep Alives
+ * Enables persistent connections over a load balancer
+* Efficient
+ * Enables thousands of logical streams with low overhead
+
+## Documentation
+
+For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
+
+## Specification
+
+The full specification for Yamux is provided in the `spec.md` file.
+It can be used as a guide to implementors of interoperable libraries.
+
+## Usage
+
+Using Yamux is remarkably simple:
+
+```go
+
+func client() {
+ // Get a TCP connection
+ conn, err := net.Dial(...)
+ if err != nil {
+ panic(err)
+ }
+
+ // Setup client side of yamux
+ session, err := yamux.Client(conn, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Open a new stream
+ stream, err := session.Open()
+ if err != nil {
+ panic(err)
+ }
+
+ // Stream implements net.Conn
+ stream.Write([]byte("ping"))
+}
+
+func server() {
+ // Accept a TCP connection
+ conn, err := listener.Accept()
+ if err != nil {
+ panic(err)
+ }
+
+ // Setup server side of yamux
+ session, err := yamux.Server(conn, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Accept a stream
+ stream, err := session.Accept()
+ if err != nil {
+ panic(err)
+ }
+
+ // Listen for a message
+ buf := make([]byte, 4)
+ stream.Read(buf)
+}
+
+```
+
diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go
new file mode 100644
index 00000000..f6a00199
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/addr.go
@@ -0,0 +1,60 @@
+package yamux
+
+import (
+ "fmt"
+ "net"
+)
+
+// hasAddr is used to get the address from the underlying connection
+type hasAddr interface {
+ LocalAddr() net.Addr
+ RemoteAddr() net.Addr
+}
+
+// yamuxAddr is used when we cannot get the underlying address
+type yamuxAddr struct {
+ Addr string
+}
+
+func (*yamuxAddr) Network() string {
+ return "yamux"
+}
+
+func (y *yamuxAddr) String() string {
+ return fmt.Sprintf("yamux:%s", y.Addr)
+}
+
+// Addr is used to get the address of the listener.
+func (s *Session) Addr() net.Addr {
+ return s.LocalAddr()
+}
+
+// LocalAddr is used to get the local address of the
+// underlying connection.
+func (s *Session) LocalAddr() net.Addr {
+ addr, ok := s.conn.(hasAddr)
+ if !ok {
+ return &yamuxAddr{"local"}
+ }
+ return addr.LocalAddr()
+}
+
+// RemoteAddr is used to get the address of remote end
+// of the underlying connection
+func (s *Session) RemoteAddr() net.Addr {
+ addr, ok := s.conn.(hasAddr)
+ if !ok {
+ return &yamuxAddr{"remote"}
+ }
+ return addr.RemoteAddr()
+}
+
+// LocalAddr returns the local address
+func (s *Stream) LocalAddr() net.Addr {
+ return s.session.LocalAddr()
+}
+
+// RemoteAddr returns the remote address
+func (s *Stream) RemoteAddr() net.Addr {
+ return s.session.RemoteAddr()
+}
diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go
new file mode 100644
index 00000000..2fdbf844
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/const.go
@@ -0,0 +1,182 @@
+package yamux
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+// NetError implements net.Error
+type NetError struct {
+ err error
+ timeout bool
+ temporary bool
+}
+
+func (e *NetError) Error() string {
+ return e.err.Error()
+}
+
+func (e *NetError) Timeout() bool {
+ return e.timeout
+}
+
+func (e *NetError) Temporary() bool {
+ return e.temporary
+}
+
+var (
+ // ErrInvalidVersion means we received a frame with an
+ // invalid version
+ ErrInvalidVersion = fmt.Errorf("invalid protocol version")
+
+ // ErrInvalidMsgType means we received a frame with an
+ // invalid message type
+ ErrInvalidMsgType = fmt.Errorf("invalid msg type")
+
+ // ErrSessionShutdown is used if there is a shutdown during
+ // an operation
+ ErrSessionShutdown = fmt.Errorf("session shutdown")
+
+ // ErrStreamsExhausted is returned if we have no more
+ // stream ids to issue
+ ErrStreamsExhausted = fmt.Errorf("streams exhausted")
+
+ // ErrDuplicateStream is used if a duplicate stream is
+ // opened inbound
+ ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
+
+ // ErrReceiveWindowExceeded indicates the window was exceeded
+ ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
+
+ // ErrTimeout is used when we reach an IO deadline
+ ErrTimeout = &NetError{
+ err: fmt.Errorf("i/o deadline reached"),
+
+ // Error should meet net.Error interface for timeouts for compatability
+ // with standard library expectations, such as http servers.
+ timeout: true,
+ }
+
+ // ErrStreamClosed is returned when using a closed stream
+ ErrStreamClosed = fmt.Errorf("stream closed")
+
+ // ErrUnexpectedFlag is set when we get an unexpected flag
+ ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
+
+ // ErrRemoteGoAway is used when we get a go away from the other side
+ ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
+
+ // ErrConnectionReset is sent if a stream is reset. This can happen
+ // if the backlog is exceeded, or if there was a remote GoAway.
+ ErrConnectionReset = fmt.Errorf("connection reset")
+
+ // ErrConnectionWriteTimeout indicates that we hit the "safety valve"
+ // timeout writing to the underlying stream connection.
+ ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
+
+ // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
+ ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
+)
+
+const (
+ // protoVersion is the only version we support
+ protoVersion uint8 = 0
+)
+
+const (
+ // Data is used for data frames. They are followed
+ // by length bytes worth of payload.
+ typeData uint8 = iota
+
+ // WindowUpdate is used to change the window of
+ // a given stream. The length indicates the delta
+ // update to the window.
+ typeWindowUpdate
+
+ // Ping is sent as a keep-alive or to measure
+ // the RTT. The StreamID and Length value are echoed
+ // back in the response.
+ typePing
+
+ // GoAway is sent to terminate a session. The StreamID
+ // should be 0 and the length is an error code.
+ typeGoAway
+)
+
+const (
+ // SYN is sent to signal a new stream. May
+ // be sent with a data payload
+ flagSYN uint16 = 1 << iota
+
+ // ACK is sent to acknowledge a new stream. May
+ // be sent with a data payload
+ flagACK
+
+ // FIN is sent to half-close the given stream.
+ // May be sent with a data payload.
+ flagFIN
+
+ // RST is used to hard close a given stream.
+ flagRST
+)
+
+const (
+ // initialStreamWindow is the initial stream window size
+ initialStreamWindow uint32 = 256 * 1024
+)
+
+const (
+ // goAwayNormal is sent on a normal termination
+ goAwayNormal uint32 = iota
+
+ // goAwayProtoErr sent on a protocol error
+ goAwayProtoErr
+
+ // goAwayInternalErr sent on an internal error
+ goAwayInternalErr
+)
+
+const (
+ sizeOfVersion = 1
+ sizeOfType = 1
+ sizeOfFlags = 2
+ sizeOfStreamID = 4
+ sizeOfLength = 4
+ headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
+ sizeOfStreamID + sizeOfLength
+)
+
+type header []byte
+
+func (h header) Version() uint8 {
+ return h[0]
+}
+
+func (h header) MsgType() uint8 {
+ return h[1]
+}
+
+func (h header) Flags() uint16 {
+ return binary.BigEndian.Uint16(h[2:4])
+}
+
+func (h header) StreamID() uint32 {
+ return binary.BigEndian.Uint32(h[4:8])
+}
+
+func (h header) Length() uint32 {
+ return binary.BigEndian.Uint32(h[8:12])
+}
+
+func (h header) String() string {
+ return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
+ h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
+}
+
+func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
+ h[0] = protoVersion
+ h[1] = msgType
+ binary.BigEndian.PutUint16(h[2:4], flags)
+ binary.BigEndian.PutUint32(h[4:8], streamID)
+ binary.BigEndian.PutUint32(h[8:12], length)
+}
diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go
new file mode 100644
index 00000000..0c3e67b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/mux.go
@@ -0,0 +1,114 @@
+package yamux
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "time"
+)
+
+// Config is used to tune the Yamux session
+type Config struct {
+ // AcceptBacklog is used to limit how many streams may be
+ // waiting an accept.
+ AcceptBacklog int
+
+ // EnableKeepalive is used to do a period keep alive
+ // messages using a ping.
+ EnableKeepAlive bool
+
+ // KeepAliveInterval is how often to perform the keep alive
+ KeepAliveInterval time.Duration
+
+ // ConnectionWriteTimeout is meant to be a "safety valve" timeout after
+ // we which will suspect a problem with the underlying connection and
+ // close it. This is only applied to writes, where's there's generally
+ // an expectation that things will move along quickly.
+ ConnectionWriteTimeout time.Duration
+
+ // MaxStreamWindowSize is used to control the maximum
+ // window size that we allow for a stream.
+ MaxStreamWindowSize uint32
+
+ // StreamOpenTimeout is the maximum amount of time that a stream will
+ // be allowed to remain in pending state while waiting for an ack from the peer.
+ // Once the timeout is reached the session will be gracefully closed.
+ // A zero value disables the StreamOpenTimeout allowing unbounded
+ // blocking on OpenStream calls.
+ StreamOpenTimeout time.Duration
+
+ // StreamCloseTimeout is the maximum time that a stream will allowed to
+ // be in a half-closed state when `Close` is called before forcibly
+ // closing the connection. Forcibly closed connections will empty the
+ // receive buffer, drop any future packets received for that stream,
+ // and send a RST to the remote side.
+ StreamCloseTimeout time.Duration
+
+ // LogOutput is used to control the log destination. Either Logger or
+ // LogOutput can be set, not both.
+ LogOutput io.Writer
+
+ // Logger is used to pass in the logger to be used. Either Logger or
+ // LogOutput can be set, not both.
+ Logger *log.Logger
+}
+
+// DefaultConfig is used to return a default configuration
+func DefaultConfig() *Config {
+ return &Config{
+ AcceptBacklog: 256,
+ EnableKeepAlive: true,
+ KeepAliveInterval: 30 * time.Second,
+ ConnectionWriteTimeout: 10 * time.Second,
+ MaxStreamWindowSize: initialStreamWindow,
+ StreamCloseTimeout: 5 * time.Minute,
+ StreamOpenTimeout: 75 * time.Second,
+ LogOutput: os.Stderr,
+ }
+}
+
+// VerifyConfig is used to verify the sanity of configuration
+func VerifyConfig(config *Config) error {
+ if config.AcceptBacklog <= 0 {
+ return fmt.Errorf("backlog must be positive")
+ }
+ if config.KeepAliveInterval == 0 {
+ return fmt.Errorf("keep-alive interval must be positive")
+ }
+ if config.MaxStreamWindowSize < initialStreamWindow {
+ return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
+ }
+ if config.LogOutput != nil && config.Logger != nil {
+ return fmt.Errorf("both Logger and LogOutput may not be set, select one")
+ } else if config.LogOutput == nil && config.Logger == nil {
+ return fmt.Errorf("one of Logger or LogOutput must be set, select one")
+ }
+ return nil
+}
+
+// Server is used to initialize a new server-side connection.
+// There must be at most one server-side connection. If a nil config is
+// provided, the DefaultConfiguration will be used.
+func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+ if config == nil {
+ config = DefaultConfig()
+ }
+ if err := VerifyConfig(config); err != nil {
+ return nil, err
+ }
+ return newSession(config, conn, false), nil
+}
+
+// Client is used to initialize a new client-side connection.
+// There must be at most one client-side connection.
+func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+ if config == nil {
+ config = DefaultConfig()
+ }
+
+ if err := VerifyConfig(config); err != nil {
+ return nil, err
+ }
+ return newSession(config, conn, true), nil
+}
diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go
new file mode 100644
index 00000000..38fe3ed1
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/session.go
@@ -0,0 +1,732 @@
+package yamux
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Session is used to wrap a reliable ordered connection and to
+// multiplex it into multiple streams.
+type Session struct {
+ // remoteGoAway indicates the remote side does
+ // not want futher connections. Must be first for alignment.
+ remoteGoAway int32
+
+ // localGoAway indicates that we should stop
+ // accepting futher connections. Must be first for alignment.
+ localGoAway int32
+
+ // nextStreamID is the next stream we should
+ // send. This depends if we are a client/server.
+ nextStreamID uint32
+
+ // config holds our configuration
+ config *Config
+
+ // logger is used for our logs
+ logger *log.Logger
+
+ // conn is the underlying connection
+ conn io.ReadWriteCloser
+
+ // bufRead is a buffered reader
+ bufRead *bufio.Reader
+
+ // pings is used to track inflight pings
+ pings map[uint32]chan struct{}
+ pingID uint32
+ pingLock sync.Mutex
+
+ // streams maps a stream id to a stream, and inflight has an entry
+ // for any outgoing stream that has not yet been established. Both are
+ // protected by streamLock.
+ streams map[uint32]*Stream
+ inflight map[uint32]struct{}
+ streamLock sync.Mutex
+
+ // synCh acts like a semaphore. It is sized to the AcceptBacklog which
+ // is assumed to be symmetric between the client and server. This allows
+ // the client to avoid exceeding the backlog and instead blocks the open.
+ synCh chan struct{}
+
+ // acceptCh is used to pass ready streams to the client
+ acceptCh chan *Stream
+
+ // sendCh is used to mark a stream as ready to send,
+ // or to send a header out directly.
+ sendCh chan *sendReady
+
+ // recvDoneCh is closed when recv() exits to avoid a race
+ // between stream registration and stream shutdown
+ recvDoneCh chan struct{}
+ sendDoneCh chan struct{}
+
+ // shutdown is used to safely close a session
+ shutdown bool
+ shutdownErr error
+ shutdownCh chan struct{}
+ shutdownLock sync.Mutex
+ shutdownErrLock sync.Mutex
+}
+
+// sendReady is used to either mark a stream as ready
+// or to directly send a header
+type sendReady struct {
+ Hdr []byte
+ mu sync.Mutex // Protects Body from unsafe reads.
+ Body []byte
+ Err chan error
+}
+
+// newSession is used to construct a new session
+func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
+ logger := config.Logger
+ if logger == nil {
+ logger = log.New(config.LogOutput, "", log.LstdFlags)
+ }
+
+ s := &Session{
+ config: config,
+ logger: logger,
+ conn: conn,
+ bufRead: bufio.NewReader(conn),
+ pings: make(map[uint32]chan struct{}),
+ streams: make(map[uint32]*Stream),
+ inflight: make(map[uint32]struct{}),
+ synCh: make(chan struct{}, config.AcceptBacklog),
+ acceptCh: make(chan *Stream, config.AcceptBacklog),
+ sendCh: make(chan *sendReady, 64),
+ recvDoneCh: make(chan struct{}),
+ sendDoneCh: make(chan struct{}),
+ shutdownCh: make(chan struct{}),
+ }
+ if client {
+ s.nextStreamID = 1
+ } else {
+ s.nextStreamID = 2
+ }
+ go s.recv()
+ go s.send()
+ if config.EnableKeepAlive {
+ go s.keepalive()
+ }
+ return s
+}
+
+// IsClosed does a safe check to see if we have shutdown
+func (s *Session) IsClosed() bool {
+ select {
+ case <-s.shutdownCh:
+ return true
+ default:
+ return false
+ }
+}
+
+// CloseChan returns a read-only channel which is closed as
+// soon as the session is closed.
+func (s *Session) CloseChan() <-chan struct{} {
+ return s.shutdownCh
+}
+
+// NumStreams returns the number of currently open streams
+func (s *Session) NumStreams() int {
+ s.streamLock.Lock()
+ num := len(s.streams)
+ s.streamLock.Unlock()
+ return num
+}
+
+// Open is used to create a new stream as a net.Conn
+func (s *Session) Open() (net.Conn, error) {
+ conn, err := s.OpenStream()
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// OpenStream is used to create a new stream
+func (s *Session) OpenStream() (*Stream, error) {
+ if s.IsClosed() {
+ return nil, ErrSessionShutdown
+ }
+ if atomic.LoadInt32(&s.remoteGoAway) == 1 {
+ return nil, ErrRemoteGoAway
+ }
+
+ // Block if we have too many inflight SYNs
+ select {
+ case s.synCh <- struct{}{}:
+ case <-s.shutdownCh:
+ return nil, ErrSessionShutdown
+ }
+
+GET_ID:
+ // Get an ID, and check for stream exhaustion
+ id := atomic.LoadUint32(&s.nextStreamID)
+ if id >= math.MaxUint32-1 {
+ return nil, ErrStreamsExhausted
+ }
+ if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
+ goto GET_ID
+ }
+
+ // Register the stream
+ stream := newStream(s, id, streamInit)
+ s.streamLock.Lock()
+ s.streams[id] = stream
+ s.inflight[id] = struct{}{}
+ s.streamLock.Unlock()
+
+ if s.config.StreamOpenTimeout > 0 {
+ go s.setOpenTimeout(stream)
+ }
+
+ // Send the window update to create
+ if err := stream.sendWindowUpdate(); err != nil {
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
+ }
+ return nil, err
+ }
+ return stream, nil
+}
+
+// setOpenTimeout implements a timeout for streams that are opened but not established.
+// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK,
+// and close the session.
+// The number of running timers is bounded by the capacity of the synCh.
+func (s *Session) setOpenTimeout(stream *Stream) {
+ timer := time.NewTimer(s.config.StreamOpenTimeout)
+ defer timer.Stop()
+
+ select {
+ case <-stream.establishCh:
+ return
+ case <-s.shutdownCh:
+ return
+ case <-timer.C:
+ // Timeout reached while waiting for ACK.
+ // Close the session to force connection re-establishment.
+ s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err)
+ s.Close()
+ }
+}
+
+// Accept is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) Accept() (net.Conn, error) {
+ conn, err := s.AcceptStream()
+ if err != nil {
+ return nil, err
+ }
+ return conn, err
+}
+
+// AcceptStream is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) AcceptStream() (*Stream, error) {
+ select {
+ case stream := <-s.acceptCh:
+ if err := stream.sendWindowUpdate(); err != nil {
+ return nil, err
+ }
+ return stream, nil
+ case <-s.shutdownCh:
+ return nil, s.shutdownErr
+ }
+}
+
+// Close is used to close the session and all streams.
+// Attempts to send a GoAway before closing the connection.
+func (s *Session) Close() error {
+ s.shutdownLock.Lock()
+ defer s.shutdownLock.Unlock()
+
+ if s.shutdown {
+ return nil
+ }
+ s.shutdown = true
+
+ s.shutdownErrLock.Lock()
+ if s.shutdownErr == nil {
+ s.shutdownErr = ErrSessionShutdown
+ }
+ s.shutdownErrLock.Unlock()
+
+ close(s.shutdownCh)
+
+ s.conn.Close()
+ <-s.recvDoneCh
+
+ s.streamLock.Lock()
+ defer s.streamLock.Unlock()
+ for _, stream := range s.streams {
+ stream.forceClose()
+ }
+ <-s.sendDoneCh
+ return nil
+}
+
+// exitErr is used to handle an error that is causing the
+// session to terminate.
+func (s *Session) exitErr(err error) {
+ s.shutdownErrLock.Lock()
+ if s.shutdownErr == nil {
+ s.shutdownErr = err
+ }
+ s.shutdownErrLock.Unlock()
+ s.Close()
+}
+
+// GoAway can be used to prevent accepting further
+// connections. It does not close the underlying conn.
+func (s *Session) GoAway() error {
+ return s.waitForSend(s.goAway(goAwayNormal), nil)
+}
+
+// goAway is used to send a goAway message
+func (s *Session) goAway(reason uint32) header {
+ atomic.SwapInt32(&s.localGoAway, 1)
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeGoAway, 0, 0, reason)
+ return hdr
+}
+
+// Ping is used to measure the RTT response time
+func (s *Session) Ping() (time.Duration, error) {
+ // Get a channel for the ping
+ ch := make(chan struct{})
+
+ // Get a new ping id, mark as pending
+ s.pingLock.Lock()
+ id := s.pingID
+ s.pingID++
+ s.pings[id] = ch
+ s.pingLock.Unlock()
+
+ // Send the ping request
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typePing, flagSYN, 0, id)
+ if err := s.waitForSend(hdr, nil); err != nil {
+ return 0, err
+ }
+
+ // Wait for a response
+ start := time.Now()
+ select {
+ case <-ch:
+ case <-time.After(s.config.ConnectionWriteTimeout):
+ s.pingLock.Lock()
+ delete(s.pings, id) // Ignore it if a response comes later.
+ s.pingLock.Unlock()
+ return 0, ErrTimeout
+ case <-s.shutdownCh:
+ return 0, ErrSessionShutdown
+ }
+
+ // Compute the RTT
+ return time.Now().Sub(start), nil
+}
+
+// keepalive is a long running goroutine that periodically does
+// a ping to keep the connection alive.
+func (s *Session) keepalive() {
+ for {
+ select {
+ case <-time.After(s.config.KeepAliveInterval):
+ _, err := s.Ping()
+ if err != nil {
+ if err != ErrSessionShutdown {
+ s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
+ s.exitErr(ErrKeepAliveTimeout)
+ }
+ return
+ }
+ case <-s.shutdownCh:
+ return
+ }
+ }
+}
+
+// waitForSendErr waits to send a header, checking for a potential shutdown
+func (s *Session) waitForSend(hdr header, body []byte) error {
+ errCh := make(chan error, 1)
+ return s.waitForSendErr(hdr, body, errCh)
+}
+
+// waitForSendErr waits to send a header with optional data, checking for a
+// potential shutdown. Since there's the expectation that sends can happen
+// in a timely manner, we enforce the connection write timeout here.
+func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error {
+ t := timerPool.Get()
+ timer := t.(*time.Timer)
+ timer.Reset(s.config.ConnectionWriteTimeout)
+ defer func() {
+ timer.Stop()
+ select {
+ case <-timer.C:
+ default:
+ }
+ timerPool.Put(t)
+ }()
+
+ ready := &sendReady{Hdr: hdr, Body: body, Err: errCh}
+ select {
+ case s.sendCh <- ready:
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+
+ bodyCopy := func() {
+ if body == nil {
+ return // A nil body is ignored.
+ }
+
+ // In the event of session shutdown or connection write timeout,
+ // we need to prevent `send` from reading the body buffer after
+ // returning from this function since the caller may re-use the
+ // underlying array.
+ ready.mu.Lock()
+ defer ready.mu.Unlock()
+
+ if ready.Body == nil {
+ return // Body was already copied in `send`.
+ }
+ newBody := make([]byte, len(body))
+ copy(newBody, body)
+ ready.Body = newBody
+ }
+
+ select {
+ case err := <-errCh:
+ return err
+ case <-s.shutdownCh:
+ bodyCopy()
+ return ErrSessionShutdown
+ case <-timer.C:
+ bodyCopy()
+ return ErrConnectionWriteTimeout
+ }
+}
+
+// sendNoWait does a send without waiting. Since there's the expectation that
+// the send happens right here, we enforce the connection write timeout if we
+// can't queue the header to be sent.
+func (s *Session) sendNoWait(hdr header) error {
+ t := timerPool.Get()
+ timer := t.(*time.Timer)
+ timer.Reset(s.config.ConnectionWriteTimeout)
+ defer func() {
+ timer.Stop()
+ select {
+ case <-timer.C:
+ default:
+ }
+ timerPool.Put(t)
+ }()
+
+ select {
+ case s.sendCh <- &sendReady{Hdr: hdr}:
+ return nil
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+}
+
+// send is a long running goroutine that sends data
+func (s *Session) send() {
+ if err := s.sendLoop(); err != nil {
+ s.exitErr(err)
+ }
+}
+
+func (s *Session) sendLoop() error {
+ defer close(s.sendDoneCh)
+ var bodyBuf bytes.Buffer
+ for {
+ bodyBuf.Reset()
+
+ select {
+ case ready := <-s.sendCh:
+ // Send a header if ready
+ if ready.Hdr != nil {
+ _, err := s.conn.Write(ready.Hdr)
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
+ asyncSendErr(ready.Err, err)
+ return err
+ }
+ }
+
+ ready.mu.Lock()
+ if ready.Body != nil {
+ // Copy the body into the buffer to avoid
+ // holding a mutex lock during the write.
+ _, err := bodyBuf.Write(ready.Body)
+ if err != nil {
+ ready.Body = nil
+ ready.mu.Unlock()
+ s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err)
+ asyncSendErr(ready.Err, err)
+ return err
+ }
+ ready.Body = nil
+ }
+ ready.mu.Unlock()
+
+ if bodyBuf.Len() > 0 {
+ // Send data from a body if given
+ _, err := s.conn.Write(bodyBuf.Bytes())
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
+ asyncSendErr(ready.Err, err)
+ return err
+ }
+ }
+
+ // No error, successful send
+ asyncSendErr(ready.Err, nil)
+ case <-s.shutdownCh:
+ return nil
+ }
+ }
+}
+
+// recv is a long running goroutine that accepts new data
+func (s *Session) recv() {
+ if err := s.recvLoop(); err != nil {
+ s.exitErr(err)
+ }
+}
+
+// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type
+var (
+ handlers = []func(*Session, header) error{
+ typeData: (*Session).handleStreamMessage,
+ typeWindowUpdate: (*Session).handleStreamMessage,
+ typePing: (*Session).handlePing,
+ typeGoAway: (*Session).handleGoAway,
+ }
+)
+
+// recvLoop continues to receive data until a fatal error is encountered
+func (s *Session) recvLoop() error {
+ defer close(s.recvDoneCh)
+ hdr := header(make([]byte, headerSize))
+ for {
+ // Read the header
+ if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
+ if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
+ s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
+ }
+ return err
+ }
+
+ // Verify the version
+ if hdr.Version() != protoVersion {
+ s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
+ return ErrInvalidVersion
+ }
+
+ mt := hdr.MsgType()
+ if mt < typeData || mt > typeGoAway {
+ return ErrInvalidMsgType
+ }
+
+ if err := handlers[mt](s, hdr); err != nil {
+ return err
+ }
+ }
+}
+
+// handleStreamMessage handles either a data or window update frame
+func (s *Session) handleStreamMessage(hdr header) error {
+ // Check for a new stream creation
+ id := hdr.StreamID()
+ flags := hdr.Flags()
+ if flags&flagSYN == flagSYN {
+ if err := s.incomingStream(id); err != nil {
+ return err
+ }
+ }
+
+ // Get the stream
+ s.streamLock.Lock()
+ stream := s.streams[id]
+ s.streamLock.Unlock()
+
+ // If we do not have a stream, likely we sent a RST
+ if stream == nil {
+ // Drain any data on the wire
+ if hdr.MsgType() == typeData && hdr.Length() > 0 {
+ s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
+ if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
+ return nil
+ }
+ } else {
+ s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
+ }
+ return nil
+ }
+
+ // Check if this is a window update
+ if hdr.MsgType() == typeWindowUpdate {
+ if err := stream.incrSendWindow(hdr, flags); err != nil {
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return err
+ }
+ return nil
+ }
+
+ // Read the new data
+ if err := stream.readData(hdr, flags, s.bufRead); err != nil {
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return err
+ }
+ return nil
+}
+
+// handlePing is invokde for a typePing frame
+func (s *Session) handlePing(hdr header) error {
+ flags := hdr.Flags()
+ pingID := hdr.Length()
+
+ // Check if this is a query, respond back in a separate context so we
+ // don't interfere with the receiving thread blocking for the write.
+ if flags&flagSYN == flagSYN {
+ go func() {
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typePing, flagACK, 0, pingID)
+ if err := s.sendNoWait(hdr); err != nil {
+ s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
+ }
+ }()
+ return nil
+ }
+
+ // Handle a response
+ s.pingLock.Lock()
+ ch := s.pings[pingID]
+ if ch != nil {
+ delete(s.pings, pingID)
+ close(ch)
+ }
+ s.pingLock.Unlock()
+ return nil
+}
+
+// handleGoAway is invokde for a typeGoAway frame
+func (s *Session) handleGoAway(hdr header) error {
+ code := hdr.Length()
+ switch code {
+ case goAwayNormal:
+ atomic.SwapInt32(&s.remoteGoAway, 1)
+ case goAwayProtoErr:
+ s.logger.Printf("[ERR] yamux: received protocol error go away")
+ return fmt.Errorf("yamux protocol error")
+ case goAwayInternalErr:
+ s.logger.Printf("[ERR] yamux: received internal error go away")
+ return fmt.Errorf("remote yamux internal error")
+ default:
+ s.logger.Printf("[ERR] yamux: received unexpected go away")
+ return fmt.Errorf("unexpected go away received")
+ }
+ return nil
+}
+
+// incomingStream is used to create a new incoming stream
+func (s *Session) incomingStream(id uint32) error {
+ // Reject immediately if we are doing a go away
+ if atomic.LoadInt32(&s.localGoAway) == 1 {
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeWindowUpdate, flagRST, id, 0)
+ return s.sendNoWait(hdr)
+ }
+
+ // Allocate a new stream
+ stream := newStream(s, id, streamSYNReceived)
+
+ s.streamLock.Lock()
+ defer s.streamLock.Unlock()
+
+ // Check if stream already exists
+ if _, ok := s.streams[id]; ok {
+ s.logger.Printf("[ERR] yamux: duplicate stream declared")
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return ErrDuplicateStream
+ }
+
+ // Register the stream
+ s.streams[id] = stream
+
+ // Check if we've exceeded the backlog
+ select {
+ case s.acceptCh <- stream:
+ return nil
+ default:
+ // Backlog exceeded! RST the stream
+ s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
+ delete(s.streams, id)
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeWindowUpdate, flagRST, id, 0)
+ return s.sendNoWait(hdr)
+ }
+}
+
+// closeStream is used to close a stream once both sides have
+// issued a close. If there was an in-flight SYN and the stream
+// was not yet established, then this will give the credit back.
+func (s *Session) closeStream(id uint32) {
+ s.streamLock.Lock()
+ if _, ok := s.inflight[id]; ok {
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
+ }
+ }
+ delete(s.streams, id)
+ s.streamLock.Unlock()
+}
+
+// establishStream is used to mark a stream that was in the
+// SYN Sent state as established.
+func (s *Session) establishStream(id uint32) {
+ s.streamLock.Lock()
+ if _, ok := s.inflight[id]; ok {
+ delete(s.inflight, id)
+ } else {
+ s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
+ }
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
+ }
+ s.streamLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md
new file mode 100644
index 00000000..183d797b
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/spec.md
@@ -0,0 +1,140 @@
+# Specification
+
+We use this document to detail the internal specification of Yamux.
+This is used both as a guide for implementing Yamux, but also for
+alternative interoperable libraries to be built.
+
+# Framing
+
+Yamux uses a streaming connection underneath, but imposes a message
+framing so that it can be shared between many logical streams. Each
+frame contains a header like:
+
+* Version (8 bits)
+* Type (8 bits)
+* Flags (16 bits)
+* StreamID (32 bits)
+* Length (32 bits)
+
+This means that each header has a 12 byte overhead.
+All fields are encoded in network order (big endian).
+Each field is described below:
+
+## Version Field
+
+The version field is used for future backward compatibility. At the
+current time, the field is always set to 0, to indicate the initial
+version.
+
+## Type Field
+
+The type field is used to switch the frame message type. The following
+message types are supported:
+
+* 0x0 Data - Used to transmit data. May transmit zero length payloads
+ depending on the flags.
+
+* 0x1 Window Update - Used to updated the senders receive window size.
+ This is used to implement per-session flow control.
+
+* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat
+ and do keep-alives over TCP.
+
+* 0x3 Go Away - Used to close a session.
+
+## Flag Field
+
+The flags field is used to provide additional information related
+to the message type. The following flags are supported:
+
+* 0x1 SYN - Signals the start of a new stream. May be sent with a data or
+ window update message. Also sent with a ping to indicate outbound.
+
+* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data
+ or window update message. Also sent with a ping to indicate response.
+
+* 0x4 FIN - Performs a half-close of a stream. May be sent with a data
+ message or window update.
+
+* 0x8 RST - Reset a stream immediately. May be sent with a data or
+ window update message.
+
+## StreamID Field
+
+The StreamID field is used to identify the logical stream the frame
+is addressing. The client side should use odd ID's, and the server even.
+This prevents any collisions. Additionally, the 0 ID is reserved to represent
+the session.
+
+Both Ping and Go Away messages should always use the 0 StreamID.
+
+## Length Field
+
+The meaning of the length field depends on the message type:
+
+* Data - provides the length of bytes following the header
+* Window update - provides a delta update to the window size
+* Ping - Contains an opaque value, echoed back
+* Go Away - Contains an error code
+
+# Message Flow
+
+There is no explicit connection setup, as Yamux relies on an underlying
+transport to be provided. However, there is a distinction between client
+and server side of the connection.
+
+## Opening a stream
+
+To open a stream, an initial data or window update frame is sent
+with a new StreamID. The SYN flag should be set to signal a new stream.
+
+The receiver must then reply with either a data or window update frame
+with the StreamID along with the ACK flag to accept the stream or with
+the RST flag to reject the stream.
+
+Because we are relying on the reliable stream underneath, a connection
+can begin sending data once the SYN flag is sent. The corresponding
+ACK does not need to be received. This is particularly well suited
+for an RPC system where a client wants to open a stream and immediately
+fire a request without waiting for the RTT of the ACK.
+
+This does introduce the possibility of a connection being rejected
+after data has been sent already. This is a slight semantic difference
+from TCP, where the conection cannot be refused after it is opened.
+Clients should be prepared to handle this by checking for an error
+that indicates a RST was received.
+
+## Closing a stream
+
+To close a stream, either side sends a data or window update frame
+along with the FIN flag. This does a half-close indicating the sender
+will send no further data.
+
+Once both sides have closed the connection, the stream is closed.
+
+Alternatively, if an error occurs, the RST flag can be used to
+hard close a stream immediately.
+
+## Flow Control
+
+When Yamux is initially starts each stream with a 256KB window size.
+There is no window size for the session.
+
+To prevent the streams from stalling, window update frames should be
+sent regularly. Yamux can be configured to provide a larger limit for
+windows sizes. Both sides assume the initial 256KB window, but can
+immediately send a window update as part of the SYN/ACK indicating a
+larger window.
+
+Both sides should track the number of bytes sent in Data frames
+only, as only they are tracked as part of the window size.
+
+## Session termination
+
+When a session is being terminated, the Go Away message should
+be sent. The Length should be set to one of the following to
+provide an error code:
+
+* 0x0 Normal termination
+* 0x1 Protocol error
+* 0x2 Internal error
diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go
new file mode 100644
index 00000000..23d08fcc
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/stream.go
@@ -0,0 +1,544 @@
+package yamux
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type streamState int
+
+const (
+ streamInit streamState = iota
+ streamSYNSent
+ streamSYNReceived
+ streamEstablished
+ streamLocalClose
+ streamRemoteClose
+ streamClosed
+ streamReset
+)
+
+// Stream is used to represent a logical stream
+// within a session.
+type Stream struct {
+ recvWindow uint32
+ sendWindow uint32
+
+ id uint32
+ session *Session
+
+ state streamState
+ stateLock sync.Mutex
+
+ recvBuf *bytes.Buffer
+ recvLock sync.Mutex
+
+ controlHdr header
+ controlErr chan error
+ controlHdrLock sync.Mutex
+
+ sendHdr header
+ sendErr chan error
+ sendLock sync.Mutex
+
+ recvNotifyCh chan struct{}
+ sendNotifyCh chan struct{}
+
+ readDeadline atomic.Value // time.Time
+ writeDeadline atomic.Value // time.Time
+
+ // establishCh is notified if the stream is established or being closed.
+ establishCh chan struct{}
+
+ // closeTimer is set with stateLock held to honor the StreamCloseTimeout
+ // setting on Session.
+ closeTimer *time.Timer
+}
+
+// newStream is used to construct a new stream within
+// a given session for an ID
+func newStream(session *Session, id uint32, state streamState) *Stream {
+ s := &Stream{
+ id: id,
+ session: session,
+ state: state,
+ controlHdr: header(make([]byte, headerSize)),
+ controlErr: make(chan error, 1),
+ sendHdr: header(make([]byte, headerSize)),
+ sendErr: make(chan error, 1),
+ recvWindow: initialStreamWindow,
+ sendWindow: initialStreamWindow,
+ recvNotifyCh: make(chan struct{}, 1),
+ sendNotifyCh: make(chan struct{}, 1),
+ establishCh: make(chan struct{}, 1),
+ }
+ s.readDeadline.Store(time.Time{})
+ s.writeDeadline.Store(time.Time{})
+ return s
+}
+
+// Session returns the associated stream session
+func (s *Stream) Session() *Session {
+ return s.session
+}
+
+// StreamID returns the ID of this stream
+func (s *Stream) StreamID() uint32 {
+ return s.id
+}
+
+// Read is used to read from the stream
+func (s *Stream) Read(b []byte) (n int, err error) {
+ defer asyncNotify(s.recvNotifyCh)
+START:
+ s.stateLock.Lock()
+ switch s.state {
+ case streamLocalClose:
+ fallthrough
+ case streamRemoteClose:
+ fallthrough
+ case streamClosed:
+ s.recvLock.Lock()
+ if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+ s.recvLock.Unlock()
+ s.stateLock.Unlock()
+ return 0, io.EOF
+ }
+ s.recvLock.Unlock()
+ case streamReset:
+ s.stateLock.Unlock()
+ return 0, ErrConnectionReset
+ }
+ s.stateLock.Unlock()
+
+ // If there is no data available, block
+ s.recvLock.Lock()
+ if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+ s.recvLock.Unlock()
+ goto WAIT
+ }
+
+ // Read any bytes
+ n, _ = s.recvBuf.Read(b)
+ s.recvLock.Unlock()
+
+ // Send a window update potentially
+ err = s.sendWindowUpdate()
+ if err == ErrSessionShutdown {
+ err = nil
+ }
+ return n, err
+
+WAIT:
+ var timeout <-chan time.Time
+ var timer *time.Timer
+ readDeadline := s.readDeadline.Load().(time.Time)
+ if !readDeadline.IsZero() {
+ delay := readDeadline.Sub(time.Now())
+ timer = time.NewTimer(delay)
+ timeout = timer.C
+ }
+ select {
+ case <-s.recvNotifyCh:
+ if timer != nil {
+ timer.Stop()
+ }
+ goto START
+ case <-timeout:
+ return 0, ErrTimeout
+ }
+}
+
+// Write is used to write to the stream
+func (s *Stream) Write(b []byte) (n int, err error) {
+ s.sendLock.Lock()
+ defer s.sendLock.Unlock()
+ total := 0
+ for total < len(b) {
+ n, err := s.write(b[total:])
+ total += n
+ if err != nil {
+ return total, err
+ }
+ }
+ return total, nil
+}
+
+// write is used to write to the stream, may return on
+// a short write.
+func (s *Stream) write(b []byte) (n int, err error) {
+ var flags uint16
+ var max uint32
+ var body []byte
+START:
+ s.stateLock.Lock()
+ switch s.state {
+ case streamLocalClose:
+ fallthrough
+ case streamClosed:
+ s.stateLock.Unlock()
+ return 0, ErrStreamClosed
+ case streamReset:
+ s.stateLock.Unlock()
+ return 0, ErrConnectionReset
+ }
+ s.stateLock.Unlock()
+
+ // If there is no data available, block
+ window := atomic.LoadUint32(&s.sendWindow)
+ if window == 0 {
+ goto WAIT
+ }
+
+ // Determine the flags if any
+ flags = s.sendFlags()
+
+ // Send up to our send window
+ max = min(window, uint32(len(b)))
+ body = b[:max]
+
+ // Send the header
+ s.sendHdr.encode(typeData, flags, s.id, max)
+ if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
+ if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) {
+ // Message left in ready queue, header re-use is unsafe.
+ s.sendHdr = header(make([]byte, headerSize))
+ }
+ return 0, err
+ }
+
+ // Reduce our send window
+ atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
+
+ // Unlock
+ return int(max), err
+
+WAIT:
+ var timeout <-chan time.Time
+ writeDeadline := s.writeDeadline.Load().(time.Time)
+ if !writeDeadline.IsZero() {
+ delay := writeDeadline.Sub(time.Now())
+ timeout = time.After(delay)
+ }
+ select {
+ case <-s.sendNotifyCh:
+ goto START
+ case <-timeout:
+ return 0, ErrTimeout
+ }
+ return 0, nil
+}
+
+// sendFlags determines any flags that are appropriate
+// based on the current stream state
+func (s *Stream) sendFlags() uint16 {
+ s.stateLock.Lock()
+ defer s.stateLock.Unlock()
+ var flags uint16
+ switch s.state {
+ case streamInit:
+ flags |= flagSYN
+ s.state = streamSYNSent
+ case streamSYNReceived:
+ flags |= flagACK
+ s.state = streamEstablished
+ }
+ return flags
+}
+
+// sendWindowUpdate potentially sends a window update enabling
+// further writes to take place. Must be invoked with the lock.
+func (s *Stream) sendWindowUpdate() error {
+ s.controlHdrLock.Lock()
+ defer s.controlHdrLock.Unlock()
+
+ // Determine the delta update
+ max := s.session.config.MaxStreamWindowSize
+ var bufLen uint32
+ s.recvLock.Lock()
+ if s.recvBuf != nil {
+ bufLen = uint32(s.recvBuf.Len())
+ }
+ delta := (max - bufLen) - s.recvWindow
+
+ // Determine the flags if any
+ flags := s.sendFlags()
+
+ // Check if we can omit the update
+ if delta < (max/2) && flags == 0 {
+ s.recvLock.Unlock()
+ return nil
+ }
+
+ // Update our window
+ s.recvWindow += delta
+ s.recvLock.Unlock()
+
+ // Send the header
+ s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
+ if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+ if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) {
+ // Message left in ready queue, header re-use is unsafe.
+ s.controlHdr = header(make([]byte, headerSize))
+ }
+ return err
+ }
+ return nil
+}
+
+// sendClose is used to send a FIN
+func (s *Stream) sendClose() error {
+ s.controlHdrLock.Lock()
+ defer s.controlHdrLock.Unlock()
+
+ flags := s.sendFlags()
+ flags |= flagFIN
+ s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
+ if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+ if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) {
+ // Message left in ready queue, header re-use is unsafe.
+ s.controlHdr = header(make([]byte, headerSize))
+ }
+ return err
+ }
+ return nil
+}
+
+// Close is used to close the stream
+func (s *Stream) Close() error {
+ closeStream := false
+ s.stateLock.Lock()
+ switch s.state {
+ // Opened means we need to signal a close
+ case streamSYNSent:
+ fallthrough
+ case streamSYNReceived:
+ fallthrough
+ case streamEstablished:
+ s.state = streamLocalClose
+ goto SEND_CLOSE
+
+ case streamLocalClose:
+ case streamRemoteClose:
+ s.state = streamClosed
+ closeStream = true
+ goto SEND_CLOSE
+
+ case streamClosed:
+ case streamReset:
+ default:
+ panic("unhandled state")
+ }
+ s.stateLock.Unlock()
+ return nil
+SEND_CLOSE:
+ // This shouldn't happen (the more realistic scenario to cancel the
+ // timer is via processFlags) but just in case this ever happens, we
+ // cancel the timer to prevent dangling timers.
+ if s.closeTimer != nil {
+ s.closeTimer.Stop()
+ s.closeTimer = nil
+ }
+
+ // If we have a StreamCloseTimeout set we start the timeout timer.
+ // We do this only if we're not already closing the stream since that
+ // means this was a graceful close.
+ //
+ // This prevents memory leaks if one side (this side) closes and the
+ // remote side poorly behaves and never responds with a FIN to complete
+ // the close. After the specified timeout, we clean our resources up no
+ // matter what.
+ if !closeStream && s.session.config.StreamCloseTimeout > 0 {
+ s.closeTimer = time.AfterFunc(
+ s.session.config.StreamCloseTimeout, s.closeTimeout)
+ }
+
+ s.stateLock.Unlock()
+ s.sendClose()
+ s.notifyWaiting()
+ if closeStream {
+ s.session.closeStream(s.id)
+ }
+ return nil
+}
+
+// closeTimeout is called after StreamCloseTimeout during a close to
+// close this stream.
+func (s *Stream) closeTimeout() {
+ // Close our side forcibly
+ s.forceClose()
+
+ // Free the stream from the session map
+ s.session.closeStream(s.id)
+
+ // Send a RST so the remote side closes too.
+ s.sendLock.Lock()
+ defer s.sendLock.Unlock()
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeWindowUpdate, flagRST, s.id, 0)
+ s.session.sendNoWait(hdr)
+}
+
+// forceClose is used for when the session is exiting
+func (s *Stream) forceClose() {
+ s.stateLock.Lock()
+ s.state = streamClosed
+ s.stateLock.Unlock()
+ s.notifyWaiting()
+}
+
+// processFlags is used to update the state of the stream
+// based on set flags, if any. Lock must be held
+func (s *Stream) processFlags(flags uint16) error {
+ s.stateLock.Lock()
+ defer s.stateLock.Unlock()
+
+ // Close the stream without holding the state lock
+ closeStream := false
+ defer func() {
+ if closeStream {
+ if s.closeTimer != nil {
+ // Stop our close timeout timer since we gracefully closed
+ s.closeTimer.Stop()
+ }
+
+ s.session.closeStream(s.id)
+ }
+ }()
+
+ if flags&flagACK == flagACK {
+ if s.state == streamSYNSent {
+ s.state = streamEstablished
+ }
+ asyncNotify(s.establishCh)
+ s.session.establishStream(s.id)
+ }
+ if flags&flagFIN == flagFIN {
+ switch s.state {
+ case streamSYNSent:
+ fallthrough
+ case streamSYNReceived:
+ fallthrough
+ case streamEstablished:
+ s.state = streamRemoteClose
+ s.notifyWaiting()
+ case streamLocalClose:
+ s.state = streamClosed
+ closeStream = true
+ s.notifyWaiting()
+ default:
+ s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
+ return ErrUnexpectedFlag
+ }
+ }
+ if flags&flagRST == flagRST {
+ s.state = streamReset
+ closeStream = true
+ s.notifyWaiting()
+ }
+ return nil
+}
+
+// notifyWaiting notifies all the waiting channels
+func (s *Stream) notifyWaiting() {
+ asyncNotify(s.recvNotifyCh)
+ asyncNotify(s.sendNotifyCh)
+ asyncNotify(s.establishCh)
+}
+
+// incrSendWindow updates the size of our send window
+func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
+ if err := s.processFlags(flags); err != nil {
+ return err
+ }
+
+ // Increase window, unblock a sender
+ atomic.AddUint32(&s.sendWindow, hdr.Length())
+ asyncNotify(s.sendNotifyCh)
+ return nil
+}
+
+// readData is used to handle a data frame
+func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
+ if err := s.processFlags(flags); err != nil {
+ return err
+ }
+
+ // Check that our recv window is not exceeded
+ length := hdr.Length()
+ if length == 0 {
+ return nil
+ }
+
+ // Wrap in a limited reader
+ conn = &io.LimitedReader{R: conn, N: int64(length)}
+
+ // Copy into buffer
+ s.recvLock.Lock()
+
+ if length > s.recvWindow {
+ s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length)
+ s.recvLock.Unlock()
+ return ErrRecvWindowExceeded
+ }
+
+ if s.recvBuf == nil {
+ // Allocate the receive buffer just-in-time to fit the full data frame.
+ // This way we can read in the whole packet without further allocations.
+ s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
+ }
+ copiedLength, err := io.Copy(s.recvBuf, conn)
+ if err != nil {
+ s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
+ s.recvLock.Unlock()
+ return err
+ }
+
+ // Decrement the receive window
+ s.recvWindow -= uint32(copiedLength)
+ s.recvLock.Unlock()
+
+ // Unblock any readers
+ asyncNotify(s.recvNotifyCh)
+ return nil
+}
+
+// SetDeadline sets the read and write deadlines
+func (s *Stream) SetDeadline(t time.Time) error {
+ if err := s.SetReadDeadline(t); err != nil {
+ return err
+ }
+ if err := s.SetWriteDeadline(t); err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetReadDeadline sets the deadline for blocked and future Read calls.
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ s.readDeadline.Store(t)
+ asyncNotify(s.recvNotifyCh)
+ return nil
+}
+
+// SetWriteDeadline sets the deadline for blocked and future Write calls
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ s.writeDeadline.Store(t)
+ asyncNotify(s.sendNotifyCh)
+ return nil
+}
+
+// Shrink is used to compact the amount of buffers utilized
+// This is useful when using Yamux in a connection pool to reduce
+// the idle memory utilization.
+func (s *Stream) Shrink() {
+ s.recvLock.Lock()
+ if s.recvBuf != nil && s.recvBuf.Len() == 0 {
+ s.recvBuf = nil
+ }
+ s.recvLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go
new file mode 100644
index 00000000..8a73e924
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/util.go
@@ -0,0 +1,43 @@
+package yamux
+
+import (
+ "sync"
+ "time"
+)
+
+var (
+ timerPool = &sync.Pool{
+ New: func() interface{} {
+ timer := time.NewTimer(time.Hour * 1e6)
+ timer.Stop()
+ return timer
+ },
+ }
+)
+
+// asyncSendErr is used to try an async send of an error
+func asyncSendErr(ch chan error, err error) {
+ if ch == nil {
+ return
+ }
+ select {
+ case ch <- err:
+ default:
+ }
+}
+
+// asyncNotify is used to signal a waiting goroutine
+func asyncNotify(ch chan struct{}) {
+ select {
+ case ch <- struct{}{}:
+ default:
+ }
+}
+
+// min computes the minimum of two values
+func min(a, b uint32) uint32 {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
deleted file mode 100644
index 955dc0be..00000000
--- a/vendor/github.com/json-iterator/go/.codecov.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-ignore:
- - "output_tests/.*"
-
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
deleted file mode 100644
index 15556530..00000000
--- a/vendor/github.com/json-iterator/go/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-/vendor
-/bug_test.go
-/coverage.txt
-/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
deleted file mode 100644
index 449e67cd..00000000
--- a/vendor/github.com/json-iterator/go/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
- - 1.8.x
- - 1.x
-
-before_install:
- - go get -t -v ./...
-
-script:
- - ./test.sh
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
deleted file mode 100644
index c8a9fbb3..00000000
--- a/vendor/github.com/json-iterator/go/Gopkg.lock
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
- version = "1.0.0"
-
-[[projects]]
- name = "github.com/modern-go/reflect2"
- packages = ["."]
- revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
- version = "1.0.1"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
deleted file mode 100644
index 313a0f88..00000000
--- a/vendor/github.com/json-iterator/go/Gopkg.toml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-# name = "github.com/user/project"
-# version = "1.0.0"
-#
-# [[constraint]]
-# name = "github.com/user/project2"
-# branch = "dev"
-# source = "github.com/myfork/project2"
-#
-# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
-
-ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
-
-[[constraint]]
- name = "github.com/modern-go/reflect2"
- version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
deleted file mode 100644
index 2cf4f5ab..00000000
--- a/vendor/github.com/json-iterator/go/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2016 json-iterator
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
deleted file mode 100644
index c589addf..00000000
--- a/vendor/github.com/json-iterator/go/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
-[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go)
-[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
-[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
-[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
-[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
-[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
-
-A high-performance 100% compatible drop-in replacement of "encoding/json"
-
-# Benchmark
-
-![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
-
-Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
-
-Raw Result (easyjson requires static code generation)
-
-| | ns/op | allocation bytes | allocation times |
-| --------------- | ----------- | ---------------- | ---------------- |
-| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
-| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
-| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
-| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
-| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
-| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
-
-Always benchmark with your own workload.
-The result depends heavily on the data input.
-
-# Usage
-
-100% compatibility with standard lib
-
-Replace
-
-```go
-import "encoding/json"
-json.Marshal(&data)
-```
-
-with
-
-```go
-import jsoniter "github.com/json-iterator/go"
-
-var json = jsoniter.ConfigCompatibleWithStandardLibrary
-json.Marshal(&data)
-```
-
-Replace
-
-```go
-import "encoding/json"
-json.Unmarshal(input, &data)
-```
-
-with
-
-```go
-import jsoniter "github.com/json-iterator/go"
-
-var json = jsoniter.ConfigCompatibleWithStandardLibrary
-json.Unmarshal(input, &data)
-```
-
-[More documentation](http://jsoniter.com/migrate-from-go-std.html)
-
-# How to get
-
-```
-go get github.com/json-iterator/go
-```
-
-# Contribution Welcomed !
-
-Contributors
-
-- [thockin](https://github.com/thockin)
-- [mattn](https://github.com/mattn)
-- [cch123](https://github.com/cch123)
-- [Oleg Shaldybin](https://github.com/olegshaldybin)
-- [Jason Toffaletti](https://github.com/toffaletti)
-
-Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
deleted file mode 100644
index 92d2cc4a..00000000
--- a/vendor/github.com/json-iterator/go/adapter.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package jsoniter
-
-import (
- "bytes"
- "io"
-)
-
-// RawMessage to make replace json with jsoniter
-type RawMessage []byte
-
-// Unmarshal adapts to json/encoding Unmarshal API
-//
-// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
-// Refer to https://godoc.org/encoding/json#Unmarshal for more information
-func Unmarshal(data []byte, v interface{}) error {
- return ConfigDefault.Unmarshal(data, v)
-}
-
-// UnmarshalFromString is a convenient method to read from string instead of []byte
-func UnmarshalFromString(str string, v interface{}) error {
- return ConfigDefault.UnmarshalFromString(str, v)
-}
-
-// Get quick method to get value from deeply nested JSON structure
-func Get(data []byte, path ...interface{}) Any {
- return ConfigDefault.Get(data, path...)
-}
-
-// Marshal adapts to json/encoding Marshal API
-//
-// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
-// Refer to https://godoc.org/encoding/json#Marshal for more information
-func Marshal(v interface{}) ([]byte, error) {
- return ConfigDefault.Marshal(v)
-}
-
-// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
-func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
- return ConfigDefault.MarshalIndent(v, prefix, indent)
-}
-
-// MarshalToString convenient method to write as string instead of []byte
-func MarshalToString(v interface{}) (string, error) {
- return ConfigDefault.MarshalToString(v)
-}
-
-// NewDecoder adapts to json/stream NewDecoder API.
-//
-// NewDecoder returns a new decoder that reads from r.
-//
-// Instead of a json/encoding Decoder, an Decoder is returned
-// Refer to https://godoc.org/encoding/json#NewDecoder for more information
-func NewDecoder(reader io.Reader) *Decoder {
- return ConfigDefault.NewDecoder(reader)
-}
-
-// Decoder reads and decodes JSON values from an input stream.
-// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
-type Decoder struct {
- iter *Iterator
-}
-
-// Decode decode JSON into interface{}
-func (adapter *Decoder) Decode(obj interface{}) error {
- if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
- if !adapter.iter.loadMore() {
- return io.EOF
- }
- }
- adapter.iter.ReadVal(obj)
- err := adapter.iter.Error
- if err == io.EOF {
- return nil
- }
- return adapter.iter.Error
-}
-
-// More is there more?
-func (adapter *Decoder) More() bool {
- iter := adapter.iter
- if iter.Error != nil {
- return false
- }
- c := iter.nextToken()
- if c == 0 {
- return false
- }
- iter.unreadByte()
- return c != ']' && c != '}'
-}
-
-// Buffered remaining buffer
-func (adapter *Decoder) Buffered() io.Reader {
- remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
- return bytes.NewReader(remaining)
-}
-
-// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
-// Number instead of as a float64.
-func (adapter *Decoder) UseNumber() {
- cfg := adapter.iter.cfg.configBeforeFrozen
- cfg.UseNumber = true
- adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
-}
-
-// DisallowUnknownFields causes the Decoder to return an error when the destination
-// is a struct and the input contains object keys which do not match any
-// non-ignored, exported fields in the destination.
-func (adapter *Decoder) DisallowUnknownFields() {
- cfg := adapter.iter.cfg.configBeforeFrozen
- cfg.DisallowUnknownFields = true
- adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
-}
-
-// NewEncoder same as json.NewEncoder
-func NewEncoder(writer io.Writer) *Encoder {
- return ConfigDefault.NewEncoder(writer)
-}
-
-// Encoder same as json.Encoder
-type Encoder struct {
- stream *Stream
-}
-
-// Encode encode interface{} as JSON to io.Writer
-func (adapter *Encoder) Encode(val interface{}) error {
- adapter.stream.WriteVal(val)
- adapter.stream.WriteRaw("\n")
- adapter.stream.Flush()
- return adapter.stream.Error
-}
-
-// SetIndent set the indention. Prefix is not supported
-func (adapter *Encoder) SetIndent(prefix, indent string) {
- config := adapter.stream.cfg.configBeforeFrozen
- config.IndentionStep = len(indent)
- adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
-}
-
-// SetEscapeHTML escape html by default, set to false to disable
-func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
- config := adapter.stream.cfg.configBeforeFrozen
- config.EscapeHTML = escapeHTML
- adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
-}
-
-// Valid reports whether data is a valid JSON encoding.
-func Valid(data []byte) bool {
- return ConfigDefault.Valid(data)
-}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
deleted file mode 100644
index f6b8aeab..00000000
--- a/vendor/github.com/json-iterator/go/any.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package jsoniter
-
-import (
- "errors"
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "reflect"
- "strconv"
- "unsafe"
-)
-
-// Any generic object representation.
-// The lazy json implementation holds []byte and parse lazily.
-type Any interface {
- LastError() error
- ValueType() ValueType
- MustBeValid() Any
- ToBool() bool
- ToInt() int
- ToInt32() int32
- ToInt64() int64
- ToUint() uint
- ToUint32() uint32
- ToUint64() uint64
- ToFloat32() float32
- ToFloat64() float64
- ToString() string
- ToVal(val interface{})
- Get(path ...interface{}) Any
- Size() int
- Keys() []string
- GetInterface() interface{}
- WriteTo(stream *Stream)
-}
-
-type baseAny struct{}
-
-func (any *baseAny) Get(path ...interface{}) Any {
- return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
-}
-
-func (any *baseAny) Size() int {
- return 0
-}
-
-func (any *baseAny) Keys() []string {
- return []string{}
-}
-
-func (any *baseAny) ToVal(obj interface{}) {
- panic("not implemented")
-}
-
-// WrapInt32 turn int32 into Any interface
-func WrapInt32(val int32) Any {
- return &int32Any{baseAny{}, val}
-}
-
-// WrapInt64 turn int64 into Any interface
-func WrapInt64(val int64) Any {
- return &int64Any{baseAny{}, val}
-}
-
-// WrapUint32 turn uint32 into Any interface
-func WrapUint32(val uint32) Any {
- return &uint32Any{baseAny{}, val}
-}
-
-// WrapUint64 turn uint64 into Any interface
-func WrapUint64(val uint64) Any {
- return &uint64Any{baseAny{}, val}
-}
-
-// WrapFloat64 turn float64 into Any interface
-func WrapFloat64(val float64) Any {
- return &floatAny{baseAny{}, val}
-}
-
-// WrapString turn string into Any interface
-func WrapString(val string) Any {
- return &stringAny{baseAny{}, val}
-}
-
-// Wrap turn a go object into Any interface
-func Wrap(val interface{}) Any {
- if val == nil {
- return &nilAny{}
- }
- asAny, isAny := val.(Any)
- if isAny {
- return asAny
- }
- typ := reflect2.TypeOf(val)
- switch typ.Kind() {
- case reflect.Slice:
- return wrapArray(val)
- case reflect.Struct:
- return wrapStruct(val)
- case reflect.Map:
- return wrapMap(val)
- case reflect.String:
- return WrapString(val.(string))
- case reflect.Int:
- if strconv.IntSize == 32 {
- return WrapInt32(int32(val.(int)))
- }
- return WrapInt64(int64(val.(int)))
- case reflect.Int8:
- return WrapInt32(int32(val.(int8)))
- case reflect.Int16:
- return WrapInt32(int32(val.(int16)))
- case reflect.Int32:
- return WrapInt32(val.(int32))
- case reflect.Int64:
- return WrapInt64(val.(int64))
- case reflect.Uint:
- if strconv.IntSize == 32 {
- return WrapUint32(uint32(val.(uint)))
- }
- return WrapUint64(uint64(val.(uint)))
- case reflect.Uintptr:
- if ptrSize == 32 {
- return WrapUint32(uint32(val.(uintptr)))
- }
- return WrapUint64(uint64(val.(uintptr)))
- case reflect.Uint8:
- return WrapUint32(uint32(val.(uint8)))
- case reflect.Uint16:
- return WrapUint32(uint32(val.(uint16)))
- case reflect.Uint32:
- return WrapUint32(uint32(val.(uint32)))
- case reflect.Uint64:
- return WrapUint64(val.(uint64))
- case reflect.Float32:
- return WrapFloat64(float64(val.(float32)))
- case reflect.Float64:
- return WrapFloat64(val.(float64))
- case reflect.Bool:
- if val.(bool) == true {
- return &trueAny{}
- }
- return &falseAny{}
- }
- return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
-}
-
-// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
-func (iter *Iterator) ReadAny() Any {
- return iter.readAny()
-}
-
-func (iter *Iterator) readAny() Any {
- c := iter.nextToken()
- switch c {
- case '"':
- iter.unreadByte()
- return &stringAny{baseAny{}, iter.ReadString()}
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l') // null
- return &nilAny{}
- case 't':
- iter.skipThreeBytes('r', 'u', 'e') // true
- return &trueAny{}
- case 'f':
- iter.skipFourBytes('a', 'l', 's', 'e') // false
- return &falseAny{}
- case '{':
- return iter.readObjectAny()
- case '[':
- return iter.readArrayAny()
- case '-':
- return iter.readNumberAny(false)
- case 0:
- return &invalidAny{baseAny{}, errors.New("input is empty")}
- default:
- return iter.readNumberAny(true)
- }
-}
-
-func (iter *Iterator) readNumberAny(positive bool) Any {
- iter.startCapture(iter.head - 1)
- iter.skipNumber()
- lazyBuf := iter.stopCapture()
- return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func (iter *Iterator) readObjectAny() Any {
- iter.startCapture(iter.head - 1)
- iter.skipObject()
- lazyBuf := iter.stopCapture()
- return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func (iter *Iterator) readArrayAny() Any {
- iter.startCapture(iter.head - 1)
- iter.skipArray()
- lazyBuf := iter.stopCapture()
- return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func locateObjectField(iter *Iterator, target string) []byte {
- var found []byte
- iter.ReadObjectCB(func(iter *Iterator, field string) bool {
- if field == target {
- found = iter.SkipAndReturnBytes()
- return false
- }
- iter.Skip()
- return true
- })
- return found
-}
-
-func locateArrayElement(iter *Iterator, target int) []byte {
- var found []byte
- n := 0
- iter.ReadArrayCB(func(iter *Iterator) bool {
- if n == target {
- found = iter.SkipAndReturnBytes()
- return false
- }
- iter.Skip()
- n++
- return true
- })
- return found
-}
-
-func locatePath(iter *Iterator, path []interface{}) Any {
- for i, pathKeyObj := range path {
- switch pathKey := pathKeyObj.(type) {
- case string:
- valueBytes := locateObjectField(iter, pathKey)
- if valueBytes == nil {
- return newInvalidAny(path[i:])
- }
- iter.ResetBytes(valueBytes)
- case int:
- valueBytes := locateArrayElement(iter, pathKey)
- if valueBytes == nil {
- return newInvalidAny(path[i:])
- }
- iter.ResetBytes(valueBytes)
- case int32:
- if '*' == pathKey {
- return iter.readAny().Get(path[i:]...)
- }
- return newInvalidAny(path[i:])
- default:
- return newInvalidAny(path[i:])
- }
- }
- if iter.Error != nil && iter.Error != io.EOF {
- return &invalidAny{baseAny{}, iter.Error}
- }
- return iter.readAny()
-}
-
-var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
-
-func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ == anyType {
- return &directAnyCodec{}
- }
- if typ.Implements(anyType) {
- return &anyCodec{
- valType: typ,
- }
- }
- return nil
-}
-
-func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ == anyType {
- return &directAnyCodec{}
- }
- if typ.Implements(anyType) {
- return &anyCodec{
- valType: typ,
- }
- }
- return nil
-}
-
-type anyCodec struct {
- valType reflect2.Type
-}
-
-func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- panic("not implemented")
-}
-
-func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := codec.valType.UnsafeIndirect(ptr)
- any := obj.(Any)
- any.WriteTo(stream)
-}
-
-func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
- obj := codec.valType.UnsafeIndirect(ptr)
- any := obj.(Any)
- return any.Size() == 0
-}
-
-type directAnyCodec struct {
-}
-
-func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *(*Any)(ptr) = iter.readAny()
-}
-
-func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- any := *(*Any)(ptr)
- if any == nil {
- stream.WriteNil()
- return
- }
- any.WriteTo(stream)
-}
-
-func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
- any := *(*Any)(ptr)
- return any.Size() == 0
-}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
deleted file mode 100644
index 0449e9aa..00000000
--- a/vendor/github.com/json-iterator/go/any_array.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package jsoniter
-
-import (
- "reflect"
- "unsafe"
-)
-
-type arrayLazyAny struct {
- baseAny
- cfg *frozenConfig
- buf []byte
- err error
-}
-
-func (any *arrayLazyAny) ValueType() ValueType {
- return ArrayValue
-}
-
-func (any *arrayLazyAny) MustBeValid() Any {
- return any
-}
-
-func (any *arrayLazyAny) LastError() error {
- return any.err
-}
-
-func (any *arrayLazyAny) ToBool() bool {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.ReadArray()
-}
-
-func (any *arrayLazyAny) ToInt() int {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToInt32() int32 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToInt64() int64 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToUint() uint {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToUint32() uint32 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToUint64() uint64 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToFloat32() float32 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToFloat64() float64 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToString() string {
- return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *arrayLazyAny) ToVal(val interface{}) {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadVal(val)
-}
-
-func (any *arrayLazyAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case int:
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- valueBytes := locateArrayElement(iter, firstPath)
- if valueBytes == nil {
- return newInvalidAny(path)
- }
- iter.ResetBytes(valueBytes)
- return locatePath(iter, path[1:])
- case int32:
- if '*' == firstPath {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- arr := make([]Any, 0)
- iter.ReadArrayCB(func(iter *Iterator) bool {
- found := iter.readAny().Get(path[1:]...)
- if found.ValueType() != InvalidValue {
- arr = append(arr, found)
- }
- return true
- })
- return wrapArray(arr)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *arrayLazyAny) Size() int {
- size := 0
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadArrayCB(func(iter *Iterator) bool {
- size++
- iter.Skip()
- return true
- })
- return size
-}
-
-func (any *arrayLazyAny) WriteTo(stream *Stream) {
- stream.Write(any.buf)
-}
-
-func (any *arrayLazyAny) GetInterface() interface{} {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.Read()
-}
-
-type arrayAny struct {
- baseAny
- val reflect.Value
-}
-
-func wrapArray(val interface{}) *arrayAny {
- return &arrayAny{baseAny{}, reflect.ValueOf(val)}
-}
-
-func (any *arrayAny) ValueType() ValueType {
- return ArrayValue
-}
-
-func (any *arrayAny) MustBeValid() Any {
- return any
-}
-
-func (any *arrayAny) LastError() error {
- return nil
-}
-
-func (any *arrayAny) ToBool() bool {
- return any.val.Len() != 0
-}
-
-func (any *arrayAny) ToInt() int {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToInt32() int32 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToInt64() int64 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToUint() uint {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToUint32() uint32 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToUint64() uint64 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToFloat32() float32 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToFloat64() float64 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToString() string {
- str, _ := MarshalToString(any.val.Interface())
- return str
-}
-
-func (any *arrayAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case int:
- if firstPath < 0 || firstPath >= any.val.Len() {
- return newInvalidAny(path)
- }
- return Wrap(any.val.Index(firstPath).Interface())
- case int32:
- if '*' == firstPath {
- mappedAll := make([]Any, 0)
- for i := 0; i < any.val.Len(); i++ {
- mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
- if mapped.ValueType() != InvalidValue {
- mappedAll = append(mappedAll, mapped)
- }
- }
- return wrapArray(mappedAll)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *arrayAny) Size() int {
- return any.val.Len()
-}
-
-func (any *arrayAny) WriteTo(stream *Stream) {
- stream.WriteVal(any.val)
-}
-
-func (any *arrayAny) GetInterface() interface{} {
- return any.val.Interface()
-}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
deleted file mode 100644
index 9452324a..00000000
--- a/vendor/github.com/json-iterator/go/any_bool.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package jsoniter
-
-type trueAny struct {
- baseAny
-}
-
-func (any *trueAny) LastError() error {
- return nil
-}
-
-func (any *trueAny) ToBool() bool {
- return true
-}
-
-func (any *trueAny) ToInt() int {
- return 1
-}
-
-func (any *trueAny) ToInt32() int32 {
- return 1
-}
-
-func (any *trueAny) ToInt64() int64 {
- return 1
-}
-
-func (any *trueAny) ToUint() uint {
- return 1
-}
-
-func (any *trueAny) ToUint32() uint32 {
- return 1
-}
-
-func (any *trueAny) ToUint64() uint64 {
- return 1
-}
-
-func (any *trueAny) ToFloat32() float32 {
- return 1
-}
-
-func (any *trueAny) ToFloat64() float64 {
- return 1
-}
-
-func (any *trueAny) ToString() string {
- return "true"
-}
-
-func (any *trueAny) WriteTo(stream *Stream) {
- stream.WriteTrue()
-}
-
-func (any *trueAny) Parse() *Iterator {
- return nil
-}
-
-func (any *trueAny) GetInterface() interface{} {
- return true
-}
-
-func (any *trueAny) ValueType() ValueType {
- return BoolValue
-}
-
-func (any *trueAny) MustBeValid() Any {
- return any
-}
-
-type falseAny struct {
- baseAny
-}
-
-func (any *falseAny) LastError() error {
- return nil
-}
-
-func (any *falseAny) ToBool() bool {
- return false
-}
-
-func (any *falseAny) ToInt() int {
- return 0
-}
-
-func (any *falseAny) ToInt32() int32 {
- return 0
-}
-
-func (any *falseAny) ToInt64() int64 {
- return 0
-}
-
-func (any *falseAny) ToUint() uint {
- return 0
-}
-
-func (any *falseAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *falseAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *falseAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *falseAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *falseAny) ToString() string {
- return "false"
-}
-
-func (any *falseAny) WriteTo(stream *Stream) {
- stream.WriteFalse()
-}
-
-func (any *falseAny) Parse() *Iterator {
- return nil
-}
-
-func (any *falseAny) GetInterface() interface{} {
- return false
-}
-
-func (any *falseAny) ValueType() ValueType {
- return BoolValue
-}
-
-func (any *falseAny) MustBeValid() Any {
- return any
-}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
deleted file mode 100644
index 35fdb094..00000000
--- a/vendor/github.com/json-iterator/go/any_float.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type floatAny struct {
- baseAny
- val float64
-}
-
-func (any *floatAny) Parse() *Iterator {
- return nil
-}
-
-func (any *floatAny) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *floatAny) MustBeValid() Any {
- return any
-}
-
-func (any *floatAny) LastError() error {
- return nil
-}
-
-func (any *floatAny) ToBool() bool {
- return any.ToFloat64() != 0
-}
-
-func (any *floatAny) ToInt() int {
- return int(any.val)
-}
-
-func (any *floatAny) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *floatAny) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *floatAny) ToUint() uint {
- if any.val > 0 {
- return uint(any.val)
- }
- return 0
-}
-
-func (any *floatAny) ToUint32() uint32 {
- if any.val > 0 {
- return uint32(any.val)
- }
- return 0
-}
-
-func (any *floatAny) ToUint64() uint64 {
- if any.val > 0 {
- return uint64(any.val)
- }
- return 0
-}
-
-func (any *floatAny) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *floatAny) ToFloat64() float64 {
- return any.val
-}
-
-func (any *floatAny) ToString() string {
- return strconv.FormatFloat(any.val, 'E', -1, 64)
-}
-
-func (any *floatAny) WriteTo(stream *Stream) {
- stream.WriteFloat64(any.val)
-}
-
-func (any *floatAny) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
deleted file mode 100644
index 1b56f399..00000000
--- a/vendor/github.com/json-iterator/go/any_int32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type int32Any struct {
- baseAny
- val int32
-}
-
-func (any *int32Any) LastError() error {
- return nil
-}
-
-func (any *int32Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *int32Any) MustBeValid() Any {
- return any
-}
-
-func (any *int32Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *int32Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *int32Any) ToInt32() int32 {
- return any.val
-}
-
-func (any *int32Any) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *int32Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *int32Any) ToUint32() uint32 {
- return uint32(any.val)
-}
-
-func (any *int32Any) ToUint64() uint64 {
- return uint64(any.val)
-}
-
-func (any *int32Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *int32Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *int32Any) ToString() string {
- return strconv.FormatInt(int64(any.val), 10)
-}
-
-func (any *int32Any) WriteTo(stream *Stream) {
- stream.WriteInt32(any.val)
-}
-
-func (any *int32Any) Parse() *Iterator {
- return nil
-}
-
-func (any *int32Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
deleted file mode 100644
index c440d72b..00000000
--- a/vendor/github.com/json-iterator/go/any_int64.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type int64Any struct {
- baseAny
- val int64
-}
-
-func (any *int64Any) LastError() error {
- return nil
-}
-
-func (any *int64Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *int64Any) MustBeValid() Any {
- return any
-}
-
-func (any *int64Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *int64Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *int64Any) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *int64Any) ToInt64() int64 {
- return any.val
-}
-
-func (any *int64Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *int64Any) ToUint32() uint32 {
- return uint32(any.val)
-}
-
-func (any *int64Any) ToUint64() uint64 {
- return uint64(any.val)
-}
-
-func (any *int64Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *int64Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *int64Any) ToString() string {
- return strconv.FormatInt(any.val, 10)
-}
-
-func (any *int64Any) WriteTo(stream *Stream) {
- stream.WriteInt64(any.val)
-}
-
-func (any *int64Any) Parse() *Iterator {
- return nil
-}
-
-func (any *int64Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
deleted file mode 100644
index 1d859eac..00000000
--- a/vendor/github.com/json-iterator/go/any_invalid.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package jsoniter
-
-import "fmt"
-
-type invalidAny struct {
- baseAny
- err error
-}
-
-func newInvalidAny(path []interface{}) *invalidAny {
- return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
-}
-
-func (any *invalidAny) LastError() error {
- return any.err
-}
-
-func (any *invalidAny) ValueType() ValueType {
- return InvalidValue
-}
-
-func (any *invalidAny) MustBeValid() Any {
- panic(any.err)
-}
-
-func (any *invalidAny) ToBool() bool {
- return false
-}
-
-func (any *invalidAny) ToInt() int {
- return 0
-}
-
-func (any *invalidAny) ToInt32() int32 {
- return 0
-}
-
-func (any *invalidAny) ToInt64() int64 {
- return 0
-}
-
-func (any *invalidAny) ToUint() uint {
- return 0
-}
-
-func (any *invalidAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *invalidAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *invalidAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *invalidAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *invalidAny) ToString() string {
- return ""
-}
-
-func (any *invalidAny) WriteTo(stream *Stream) {
-}
-
-func (any *invalidAny) Get(path ...interface{}) Any {
- if any.err == nil {
- return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
- }
- return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
-}
-
-func (any *invalidAny) Parse() *Iterator {
- return nil
-}
-
-func (any *invalidAny) GetInterface() interface{} {
- return nil
-}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
deleted file mode 100644
index d04cb54c..00000000
--- a/vendor/github.com/json-iterator/go/any_nil.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package jsoniter
-
-type nilAny struct {
- baseAny
-}
-
-func (any *nilAny) LastError() error {
- return nil
-}
-
-func (any *nilAny) ValueType() ValueType {
- return NilValue
-}
-
-func (any *nilAny) MustBeValid() Any {
- return any
-}
-
-func (any *nilAny) ToBool() bool {
- return false
-}
-
-func (any *nilAny) ToInt() int {
- return 0
-}
-
-func (any *nilAny) ToInt32() int32 {
- return 0
-}
-
-func (any *nilAny) ToInt64() int64 {
- return 0
-}
-
-func (any *nilAny) ToUint() uint {
- return 0
-}
-
-func (any *nilAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *nilAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *nilAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *nilAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *nilAny) ToString() string {
- return ""
-}
-
-func (any *nilAny) WriteTo(stream *Stream) {
- stream.WriteNil()
-}
-
-func (any *nilAny) Parse() *Iterator {
- return nil
-}
-
-func (any *nilAny) GetInterface() interface{} {
- return nil
-}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
deleted file mode 100644
index 9d1e901a..00000000
--- a/vendor/github.com/json-iterator/go/any_number.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package jsoniter
-
-import (
- "io"
- "unsafe"
-)
-
-type numberLazyAny struct {
- baseAny
- cfg *frozenConfig
- buf []byte
- err error
-}
-
-func (any *numberLazyAny) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *numberLazyAny) MustBeValid() Any {
- return any
-}
-
-func (any *numberLazyAny) LastError() error {
- return any.err
-}
-
-func (any *numberLazyAny) ToBool() bool {
- return any.ToFloat64() != 0
-}
-
-func (any *numberLazyAny) ToInt() int {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadInt()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToInt32() int32 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadInt32()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToInt64() int64 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadInt64()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToUint() uint {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadUint()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToUint32() uint32 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadUint32()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToUint64() uint64 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadUint64()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToFloat32() float32 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadFloat32()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToFloat64() float64 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadFloat64()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToString() string {
- return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *numberLazyAny) WriteTo(stream *Stream) {
- stream.Write(any.buf)
-}
-
-func (any *numberLazyAny) GetInterface() interface{} {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.Read()
-}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
deleted file mode 100644
index c44ef5c9..00000000
--- a/vendor/github.com/json-iterator/go/any_object.go
+++ /dev/null
@@ -1,374 +0,0 @@
-package jsoniter
-
-import (
- "reflect"
- "unsafe"
-)
-
-type objectLazyAny struct {
- baseAny
- cfg *frozenConfig
- buf []byte
- err error
-}
-
-func (any *objectLazyAny) ValueType() ValueType {
- return ObjectValue
-}
-
-func (any *objectLazyAny) MustBeValid() Any {
- return any
-}
-
-func (any *objectLazyAny) LastError() error {
- return any.err
-}
-
-func (any *objectLazyAny) ToBool() bool {
- return true
-}
-
-func (any *objectLazyAny) ToInt() int {
- return 0
-}
-
-func (any *objectLazyAny) ToInt32() int32 {
- return 0
-}
-
-func (any *objectLazyAny) ToInt64() int64 {
- return 0
-}
-
-func (any *objectLazyAny) ToUint() uint {
- return 0
-}
-
-func (any *objectLazyAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *objectLazyAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *objectLazyAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *objectLazyAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *objectLazyAny) ToString() string {
- return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *objectLazyAny) ToVal(obj interface{}) {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadVal(obj)
-}
-
-func (any *objectLazyAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case string:
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- valueBytes := locateObjectField(iter, firstPath)
- if valueBytes == nil {
- return newInvalidAny(path)
- }
- iter.ResetBytes(valueBytes)
- return locatePath(iter, path[1:])
- case int32:
- if '*' == firstPath {
- mappedAll := map[string]Any{}
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadMapCB(func(iter *Iterator, field string) bool {
- mapped := locatePath(iter, path[1:])
- if mapped.ValueType() != InvalidValue {
- mappedAll[field] = mapped
- }
- return true
- })
- return wrapMap(mappedAll)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *objectLazyAny) Keys() []string {
- keys := []string{}
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadMapCB(func(iter *Iterator, field string) bool {
- iter.Skip()
- keys = append(keys, field)
- return true
- })
- return keys
-}
-
-func (any *objectLazyAny) Size() int {
- size := 0
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadObjectCB(func(iter *Iterator, field string) bool {
- iter.Skip()
- size++
- return true
- })
- return size
-}
-
-func (any *objectLazyAny) WriteTo(stream *Stream) {
- stream.Write(any.buf)
-}
-
-func (any *objectLazyAny) GetInterface() interface{} {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.Read()
-}
-
-type objectAny struct {
- baseAny
- err error
- val reflect.Value
-}
-
-func wrapStruct(val interface{}) *objectAny {
- return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
-}
-
-func (any *objectAny) ValueType() ValueType {
- return ObjectValue
-}
-
-func (any *objectAny) MustBeValid() Any {
- return any
-}
-
-func (any *objectAny) Parse() *Iterator {
- return nil
-}
-
-func (any *objectAny) LastError() error {
- return any.err
-}
-
-func (any *objectAny) ToBool() bool {
- return any.val.NumField() != 0
-}
-
-func (any *objectAny) ToInt() int {
- return 0
-}
-
-func (any *objectAny) ToInt32() int32 {
- return 0
-}
-
-func (any *objectAny) ToInt64() int64 {
- return 0
-}
-
-func (any *objectAny) ToUint() uint {
- return 0
-}
-
-func (any *objectAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *objectAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *objectAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *objectAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *objectAny) ToString() string {
- str, err := MarshalToString(any.val.Interface())
- any.err = err
- return str
-}
-
-func (any *objectAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case string:
- field := any.val.FieldByName(firstPath)
- if !field.IsValid() {
- return newInvalidAny(path)
- }
- return Wrap(field.Interface())
- case int32:
- if '*' == firstPath {
- mappedAll := map[string]Any{}
- for i := 0; i < any.val.NumField(); i++ {
- field := any.val.Field(i)
- if field.CanInterface() {
- mapped := Wrap(field.Interface()).Get(path[1:]...)
- if mapped.ValueType() != InvalidValue {
- mappedAll[any.val.Type().Field(i).Name] = mapped
- }
- }
- }
- return wrapMap(mappedAll)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *objectAny) Keys() []string {
- keys := make([]string, 0, any.val.NumField())
- for i := 0; i < any.val.NumField(); i++ {
- keys = append(keys, any.val.Type().Field(i).Name)
- }
- return keys
-}
-
-func (any *objectAny) Size() int {
- return any.val.NumField()
-}
-
-func (any *objectAny) WriteTo(stream *Stream) {
- stream.WriteVal(any.val)
-}
-
-func (any *objectAny) GetInterface() interface{} {
- return any.val.Interface()
-}
-
-type mapAny struct {
- baseAny
- err error
- val reflect.Value
-}
-
-func wrapMap(val interface{}) *mapAny {
- return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
-}
-
-func (any *mapAny) ValueType() ValueType {
- return ObjectValue
-}
-
-func (any *mapAny) MustBeValid() Any {
- return any
-}
-
-func (any *mapAny) Parse() *Iterator {
- return nil
-}
-
-func (any *mapAny) LastError() error {
- return any.err
-}
-
-func (any *mapAny) ToBool() bool {
- return true
-}
-
-func (any *mapAny) ToInt() int {
- return 0
-}
-
-func (any *mapAny) ToInt32() int32 {
- return 0
-}
-
-func (any *mapAny) ToInt64() int64 {
- return 0
-}
-
-func (any *mapAny) ToUint() uint {
- return 0
-}
-
-func (any *mapAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *mapAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *mapAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *mapAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *mapAny) ToString() string {
- str, err := MarshalToString(any.val.Interface())
- any.err = err
- return str
-}
-
-func (any *mapAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case int32:
- if '*' == firstPath {
- mappedAll := map[string]Any{}
- for _, key := range any.val.MapKeys() {
- keyAsStr := key.String()
- element := Wrap(any.val.MapIndex(key).Interface())
- mapped := element.Get(path[1:]...)
- if mapped.ValueType() != InvalidValue {
- mappedAll[keyAsStr] = mapped
- }
- }
- return wrapMap(mappedAll)
- }
- return newInvalidAny(path)
- default:
- value := any.val.MapIndex(reflect.ValueOf(firstPath))
- if !value.IsValid() {
- return newInvalidAny(path)
- }
- return Wrap(value.Interface())
- }
-}
-
-func (any *mapAny) Keys() []string {
- keys := make([]string, 0, any.val.Len())
- for _, key := range any.val.MapKeys() {
- keys = append(keys, key.String())
- }
- return keys
-}
-
-func (any *mapAny) Size() int {
- return any.val.Len()
-}
-
-func (any *mapAny) WriteTo(stream *Stream) {
- stream.WriteVal(any.val)
-}
-
-func (any *mapAny) GetInterface() interface{} {
- return any.val.Interface()
-}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
deleted file mode 100644
index 1f12f661..00000000
--- a/vendor/github.com/json-iterator/go/any_str.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "strconv"
-)
-
-type stringAny struct {
- baseAny
- val string
-}
-
-func (any *stringAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
-}
-
-func (any *stringAny) Parse() *Iterator {
- return nil
-}
-
-func (any *stringAny) ValueType() ValueType {
- return StringValue
-}
-
-func (any *stringAny) MustBeValid() Any {
- return any
-}
-
-func (any *stringAny) LastError() error {
- return nil
-}
-
-func (any *stringAny) ToBool() bool {
- str := any.ToString()
- if str == "0" {
- return false
- }
- for _, c := range str {
- switch c {
- case ' ', '\n', '\r', '\t':
- default:
- return true
- }
- }
- return false
-}
-
-func (any *stringAny) ToInt() int {
- return int(any.ToInt64())
-
-}
-
-func (any *stringAny) ToInt32() int32 {
- return int32(any.ToInt64())
-}
-
-func (any *stringAny) ToInt64() int64 {
- if any.val == "" {
- return 0
- }
-
- flag := 1
- startPos := 0
- if any.val[0] == '+' || any.val[0] == '-' {
- startPos = 1
- }
-
- if any.val[0] == '-' {
- flag = -1
- }
-
- endPos := startPos
- for i := startPos; i < len(any.val); i++ {
- if any.val[i] >= '0' && any.val[i] <= '9' {
- endPos = i + 1
- } else {
- break
- }
- }
- parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
- return int64(flag) * parsed
-}
-
-func (any *stringAny) ToUint() uint {
- return uint(any.ToUint64())
-}
-
-func (any *stringAny) ToUint32() uint32 {
- return uint32(any.ToUint64())
-}
-
-func (any *stringAny) ToUint64() uint64 {
- if any.val == "" {
- return 0
- }
-
- startPos := 0
-
- if any.val[0] == '-' {
- return 0
- }
- if any.val[0] == '+' {
- startPos = 1
- }
-
- endPos := startPos
- for i := startPos; i < len(any.val); i++ {
- if any.val[i] >= '0' && any.val[i] <= '9' {
- endPos = i + 1
- } else {
- break
- }
- }
- parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
- return parsed
-}
-
-func (any *stringAny) ToFloat32() float32 {
- return float32(any.ToFloat64())
-}
-
-func (any *stringAny) ToFloat64() float64 {
- if len(any.val) == 0 {
- return 0
- }
-
- // first char invalid
- if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
- return 0
- }
-
- // extract valid num expression from string
- // eg 123true => 123, -12.12xxa => -12.12
- endPos := 1
- for i := 1; i < len(any.val); i++ {
- if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
- endPos = i + 1
- continue
- }
-
- // end position is the first char which is not digit
- if any.val[i] >= '0' && any.val[i] <= '9' {
- endPos = i + 1
- } else {
- endPos = i
- break
- }
- }
- parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
- return parsed
-}
-
-func (any *stringAny) ToString() string {
- return any.val
-}
-
-func (any *stringAny) WriteTo(stream *Stream) {
- stream.WriteString(any.val)
-}
-
-func (any *stringAny) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
deleted file mode 100644
index 656bbd33..00000000
--- a/vendor/github.com/json-iterator/go/any_uint32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type uint32Any struct {
- baseAny
- val uint32
-}
-
-func (any *uint32Any) LastError() error {
- return nil
-}
-
-func (any *uint32Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *uint32Any) MustBeValid() Any {
- return any
-}
-
-func (any *uint32Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *uint32Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *uint32Any) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *uint32Any) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *uint32Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *uint32Any) ToUint32() uint32 {
- return any.val
-}
-
-func (any *uint32Any) ToUint64() uint64 {
- return uint64(any.val)
-}
-
-func (any *uint32Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *uint32Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *uint32Any) ToString() string {
- return strconv.FormatInt(int64(any.val), 10)
-}
-
-func (any *uint32Any) WriteTo(stream *Stream) {
- stream.WriteUint32(any.val)
-}
-
-func (any *uint32Any) Parse() *Iterator {
- return nil
-}
-
-func (any *uint32Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
deleted file mode 100644
index 7df2fce3..00000000
--- a/vendor/github.com/json-iterator/go/any_uint64.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type uint64Any struct {
- baseAny
- val uint64
-}
-
-func (any *uint64Any) LastError() error {
- return nil
-}
-
-func (any *uint64Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *uint64Any) MustBeValid() Any {
- return any
-}
-
-func (any *uint64Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *uint64Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *uint64Any) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *uint64Any) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *uint64Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *uint64Any) ToUint32() uint32 {
- return uint32(any.val)
-}
-
-func (any *uint64Any) ToUint64() uint64 {
- return any.val
-}
-
-func (any *uint64Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *uint64Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *uint64Any) ToString() string {
- return strconv.FormatUint(any.val, 10)
-}
-
-func (any *uint64Any) WriteTo(stream *Stream) {
- stream.WriteUint64(any.val)
-}
-
-func (any *uint64Any) Parse() *Iterator {
- return nil
-}
-
-func (any *uint64Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
deleted file mode 100644
index b45ef688..00000000
--- a/vendor/github.com/json-iterator/go/build.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -e
-set -x
-
-if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
- mkdir -p /tmp/build-golang/src/github.com/json-iterator
- ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
-fi
-export GOPATH=/tmp/build-golang
-go get -u github.com/golang/dep/cmd/dep
-cd /tmp/build-golang/src/github.com/json-iterator/go
-exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
deleted file mode 100644
index 2adcdc3b..00000000
--- a/vendor/github.com/json-iterator/go/config.go
+++ /dev/null
@@ -1,375 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "io"
- "reflect"
- "sync"
- "unsafe"
-
- "github.com/modern-go/concurrent"
- "github.com/modern-go/reflect2"
-)
-
-// Config customize how the API should behave.
-// The API is created from Config by Froze.
-type Config struct {
- IndentionStep int
- MarshalFloatWith6Digits bool
- EscapeHTML bool
- SortMapKeys bool
- UseNumber bool
- DisallowUnknownFields bool
- TagKey string
- OnlyTaggedField bool
- ValidateJsonRawMessage bool
- ObjectFieldMustBeSimpleString bool
- CaseSensitive bool
-}
-
-// API the public interface of this package.
-// Primary Marshal and Unmarshal.
-type API interface {
- IteratorPool
- StreamPool
- MarshalToString(v interface{}) (string, error)
- Marshal(v interface{}) ([]byte, error)
- MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
- UnmarshalFromString(str string, v interface{}) error
- Unmarshal(data []byte, v interface{}) error
- Get(data []byte, path ...interface{}) Any
- NewEncoder(writer io.Writer) *Encoder
- NewDecoder(reader io.Reader) *Decoder
- Valid(data []byte) bool
- RegisterExtension(extension Extension)
- DecoderOf(typ reflect2.Type) ValDecoder
- EncoderOf(typ reflect2.Type) ValEncoder
-}
-
-// ConfigDefault the default API
-var ConfigDefault = Config{
- EscapeHTML: true,
-}.Froze()
-
-// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
-var ConfigCompatibleWithStandardLibrary = Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
-}.Froze()
-
-// ConfigFastest marshals float with only 6 digits precision
-var ConfigFastest = Config{
- EscapeHTML: false,
- MarshalFloatWith6Digits: true, // will lose precession
- ObjectFieldMustBeSimpleString: true, // do not unescape object field
-}.Froze()
-
-type frozenConfig struct {
- configBeforeFrozen Config
- sortMapKeys bool
- indentionStep int
- objectFieldMustBeSimpleString bool
- onlyTaggedField bool
- disallowUnknownFields bool
- decoderCache *concurrent.Map
- encoderCache *concurrent.Map
- encoderExtension Extension
- decoderExtension Extension
- extraExtensions []Extension
- streamPool *sync.Pool
- iteratorPool *sync.Pool
- caseSensitive bool
-}
-
-func (cfg *frozenConfig) initCache() {
- cfg.decoderCache = concurrent.NewMap()
- cfg.encoderCache = concurrent.NewMap()
-}
-
-func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
- cfg.decoderCache.Store(cacheKey, decoder)
-}
-
-func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
- cfg.encoderCache.Store(cacheKey, encoder)
-}
-
-func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
- decoder, found := cfg.decoderCache.Load(cacheKey)
- if found {
- return decoder.(ValDecoder)
- }
- return nil
-}
-
-func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
- encoder, found := cfg.encoderCache.Load(cacheKey)
- if found {
- return encoder.(ValEncoder)
- }
- return nil
-}
-
-var cfgCache = concurrent.NewMap()
-
-func getFrozenConfigFromCache(cfg Config) *frozenConfig {
- obj, found := cfgCache.Load(cfg)
- if found {
- return obj.(*frozenConfig)
- }
- return nil
-}
-
-func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
- cfgCache.Store(cfg, frozenConfig)
-}
-
-// Froze forge API from config
-func (cfg Config) Froze() API {
- api := &frozenConfig{
- sortMapKeys: cfg.SortMapKeys,
- indentionStep: cfg.IndentionStep,
- objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
- onlyTaggedField: cfg.OnlyTaggedField,
- disallowUnknownFields: cfg.DisallowUnknownFields,
- caseSensitive: cfg.CaseSensitive,
- }
- api.streamPool = &sync.Pool{
- New: func() interface{} {
- return NewStream(api, nil, 512)
- },
- }
- api.iteratorPool = &sync.Pool{
- New: func() interface{} {
- return NewIterator(api)
- },
- }
- api.initCache()
- encoderExtension := EncoderExtension{}
- decoderExtension := DecoderExtension{}
- if cfg.MarshalFloatWith6Digits {
- api.marshalFloatWith6Digits(encoderExtension)
- }
- if cfg.EscapeHTML {
- api.escapeHTML(encoderExtension)
- }
- if cfg.UseNumber {
- api.useNumber(decoderExtension)
- }
- if cfg.ValidateJsonRawMessage {
- api.validateJsonRawMessage(encoderExtension)
- }
- api.encoderExtension = encoderExtension
- api.decoderExtension = decoderExtension
- api.configBeforeFrozen = cfg
- return api
-}
-
-func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
- api := getFrozenConfigFromCache(cfg)
- if api != nil {
- return api
- }
- api = cfg.Froze().(*frozenConfig)
- for _, extension := range extraExtensions {
- api.RegisterExtension(extension)
- }
- addFrozenConfigToCache(cfg, api)
- return api
-}
-
-func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
- encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
- rawMessage := *(*json.RawMessage)(ptr)
- iter := cfg.BorrowIterator([]byte(rawMessage))
- defer cfg.ReturnIterator(iter)
- iter.Read()
- if iter.Error != nil && iter.Error != io.EOF {
- stream.WriteRaw("null")
- } else {
- stream.WriteRaw(string(rawMessage))
- }
- }, func(ptr unsafe.Pointer) bool {
- return len(*((*json.RawMessage)(ptr))) == 0
- }}
- extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
- extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
-}
-
-func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
- extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
- exitingValue := *((*interface{})(ptr))
- if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
- iter.ReadVal(exitingValue)
- return
- }
- if iter.WhatIsNext() == NumberValue {
- *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
- } else {
- *((*interface{})(ptr)) = iter.Read()
- }
- }}
-}
-func (cfg *frozenConfig) getTagKey() string {
- tagKey := cfg.configBeforeFrozen.TagKey
- if tagKey == "" {
- return "json"
- }
- return tagKey
-}
-
-func (cfg *frozenConfig) RegisterExtension(extension Extension) {
- cfg.extraExtensions = append(cfg.extraExtensions, extension)
- copied := cfg.configBeforeFrozen
- cfg.configBeforeFrozen = copied
-}
-
-type lossyFloat32Encoder struct {
-}
-
-func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat32Lossy(*((*float32)(ptr)))
-}
-
-func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float32)(ptr)) == 0
-}
-
-type lossyFloat64Encoder struct {
-}
-
-func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat64Lossy(*((*float64)(ptr)))
-}
-
-func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float64)(ptr)) == 0
-}
-
-// EnableLossyFloatMarshalling keeps 10**(-6) precision
-// for float variables for better performance.
-func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
- // for better performance
- extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
- extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
-}
-
-type htmlEscapedStringEncoder struct {
-}
-
-func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- str := *((*string)(ptr))
- stream.WriteStringWithHTMLEscaped(str)
-}
-
-func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*string)(ptr)) == ""
-}
-
-func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
- encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
-}
-
-func (cfg *frozenConfig) cleanDecoders() {
- typeDecoders = map[string]ValDecoder{}
- fieldDecoders = map[string]ValDecoder{}
- *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
-}
-
-func (cfg *frozenConfig) cleanEncoders() {
- typeEncoders = map[string]ValEncoder{}
- fieldEncoders = map[string]ValEncoder{}
- *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
-}
-
-func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
- stream := cfg.BorrowStream(nil)
- defer cfg.ReturnStream(stream)
- stream.WriteVal(v)
- if stream.Error != nil {
- return "", stream.Error
- }
- return string(stream.Buffer()), nil
-}
-
-func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
- stream := cfg.BorrowStream(nil)
- defer cfg.ReturnStream(stream)
- stream.WriteVal(v)
- if stream.Error != nil {
- return nil, stream.Error
- }
- result := stream.Buffer()
- copied := make([]byte, len(result))
- copy(copied, result)
- return copied, nil
-}
-
-func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
- if prefix != "" {
- panic("prefix is not supported")
- }
- for _, r := range indent {
- if r != ' ' {
- panic("indent can only be space")
- }
- }
- newCfg := cfg.configBeforeFrozen
- newCfg.IndentionStep = len(indent)
- return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
-}
-
-func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
- data := []byte(str)
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- iter.ReadVal(v)
- c := iter.nextToken()
- if c == 0 {
- if iter.Error == io.EOF {
- return nil
- }
- return iter.Error
- }
- iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
- return iter.Error
-}
-
-func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- return locatePath(iter, path)
-}
-
-func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- iter.ReadVal(v)
- c := iter.nextToken()
- if c == 0 {
- if iter.Error == io.EOF {
- return nil
- }
- return iter.Error
- }
- iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
- return iter.Error
-}
-
-func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
- stream := NewStream(cfg, writer, 512)
- return &Encoder{stream}
-}
-
-func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
- iter := Parse(cfg, reader, 512)
- return &Decoder{iter}
-}
-
-func (cfg *frozenConfig) Valid(data []byte) bool {
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- iter.Skip()
- return iter.Error == nil
-}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
deleted file mode 100644
index 3095662b..00000000
--- a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
+++ /dev/null
@@ -1,7 +0,0 @@
-| json type \ dest type | bool | int | uint | float |string|
-| --- | --- | --- | --- |--|--|
-| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin|
-| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin|
-| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"|
-| object | true | 0 | 0 |0|originnal json|
-| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
deleted file mode 100644
index 29b31cf7..00000000
--- a/vendor/github.com/json-iterator/go/iter.go
+++ /dev/null
@@ -1,349 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "fmt"
- "io"
-)
-
-// ValueType the type for JSON element
-type ValueType int
-
-const (
- // InvalidValue invalid JSON element
- InvalidValue ValueType = iota
- // StringValue JSON element "string"
- StringValue
- // NumberValue JSON element 100 or 0.10
- NumberValue
- // NilValue JSON element null
- NilValue
- // BoolValue JSON element true or false
- BoolValue
- // ArrayValue JSON element []
- ArrayValue
- // ObjectValue JSON element {}
- ObjectValue
-)
-
-var hexDigits []byte
-var valueTypes []ValueType
-
-func init() {
- hexDigits = make([]byte, 256)
- for i := 0; i < len(hexDigits); i++ {
- hexDigits[i] = 255
- }
- for i := '0'; i <= '9'; i++ {
- hexDigits[i] = byte(i - '0')
- }
- for i := 'a'; i <= 'f'; i++ {
- hexDigits[i] = byte((i - 'a') + 10)
- }
- for i := 'A'; i <= 'F'; i++ {
- hexDigits[i] = byte((i - 'A') + 10)
- }
- valueTypes = make([]ValueType, 256)
- for i := 0; i < len(valueTypes); i++ {
- valueTypes[i] = InvalidValue
- }
- valueTypes['"'] = StringValue
- valueTypes['-'] = NumberValue
- valueTypes['0'] = NumberValue
- valueTypes['1'] = NumberValue
- valueTypes['2'] = NumberValue
- valueTypes['3'] = NumberValue
- valueTypes['4'] = NumberValue
- valueTypes['5'] = NumberValue
- valueTypes['6'] = NumberValue
- valueTypes['7'] = NumberValue
- valueTypes['8'] = NumberValue
- valueTypes['9'] = NumberValue
- valueTypes['t'] = BoolValue
- valueTypes['f'] = BoolValue
- valueTypes['n'] = NilValue
- valueTypes['['] = ArrayValue
- valueTypes['{'] = ObjectValue
-}
-
-// Iterator is a io.Reader like object, with JSON specific read functions.
-// Error is not returned as return value, but stored as Error member on this iterator instance.
-type Iterator struct {
- cfg *frozenConfig
- reader io.Reader
- buf []byte
- head int
- tail int
- depth int
- captureStartedAt int
- captured []byte
- Error error
- Attachment interface{} // open for customized decoder
-}
-
-// NewIterator creates an empty Iterator instance
-func NewIterator(cfg API) *Iterator {
- return &Iterator{
- cfg: cfg.(*frozenConfig),
- reader: nil,
- buf: nil,
- head: 0,
- tail: 0,
- depth: 0,
- }
-}
-
-// Parse creates an Iterator instance from io.Reader
-func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
- return &Iterator{
- cfg: cfg.(*frozenConfig),
- reader: reader,
- buf: make([]byte, bufSize),
- head: 0,
- tail: 0,
- depth: 0,
- }
-}
-
-// ParseBytes creates an Iterator instance from byte array
-func ParseBytes(cfg API, input []byte) *Iterator {
- return &Iterator{
- cfg: cfg.(*frozenConfig),
- reader: nil,
- buf: input,
- head: 0,
- tail: len(input),
- depth: 0,
- }
-}
-
-// ParseString creates an Iterator instance from string
-func ParseString(cfg API, input string) *Iterator {
- return ParseBytes(cfg, []byte(input))
-}
-
-// Pool returns a pool can provide more iterator with same configuration
-func (iter *Iterator) Pool() IteratorPool {
- return iter.cfg
-}
-
-// Reset reuse iterator instance by specifying another reader
-func (iter *Iterator) Reset(reader io.Reader) *Iterator {
- iter.reader = reader
- iter.head = 0
- iter.tail = 0
- iter.depth = 0
- return iter
-}
-
-// ResetBytes reuse iterator instance by specifying another byte array as input
-func (iter *Iterator) ResetBytes(input []byte) *Iterator {
- iter.reader = nil
- iter.buf = input
- iter.head = 0
- iter.tail = len(input)
- iter.depth = 0
- return iter
-}
-
-// WhatIsNext gets ValueType of relatively next json element
-func (iter *Iterator) WhatIsNext() ValueType {
- valueType := valueTypes[iter.nextToken()]
- iter.unreadByte()
- return valueType
-}
-
-func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case ' ', '\n', '\t', '\r':
- continue
- }
- iter.head = i
- return false
- }
- return true
-}
-
-func (iter *Iterator) isObjectEnd() bool {
- c := iter.nextToken()
- if c == ',' {
- return false
- }
- if c == '}' {
- return true
- }
- iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
- return true
-}
-
-func (iter *Iterator) nextToken() byte {
- // a variation of skip whitespaces, returning the next non-whitespace token
- for {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case ' ', '\n', '\t', '\r':
- continue
- }
- iter.head = i + 1
- return c
- }
- if !iter.loadMore() {
- return 0
- }
- }
-}
-
-// ReportError record a error in iterator instance with current position.
-func (iter *Iterator) ReportError(operation string, msg string) {
- if iter.Error != nil {
- if iter.Error != io.EOF {
- return
- }
- }
- peekStart := iter.head - 10
- if peekStart < 0 {
- peekStart = 0
- }
- peekEnd := iter.head + 10
- if peekEnd > iter.tail {
- peekEnd = iter.tail
- }
- parsing := string(iter.buf[peekStart:peekEnd])
- contextStart := iter.head - 50
- if contextStart < 0 {
- contextStart = 0
- }
- contextEnd := iter.head + 50
- if contextEnd > iter.tail {
- contextEnd = iter.tail
- }
- context := string(iter.buf[contextStart:contextEnd])
- iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
- operation, msg, iter.head-peekStart, parsing, context)
-}
-
-// CurrentBuffer gets current buffer as string for debugging purpose
-func (iter *Iterator) CurrentBuffer() string {
- peekStart := iter.head - 10
- if peekStart < 0 {
- peekStart = 0
- }
- return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
- string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
-}
-
-func (iter *Iterator) readByte() (ret byte) {
- if iter.head == iter.tail {
- if iter.loadMore() {
- ret = iter.buf[iter.head]
- iter.head++
- return ret
- }
- return 0
- }
- ret = iter.buf[iter.head]
- iter.head++
- return ret
-}
-
-func (iter *Iterator) loadMore() bool {
- if iter.reader == nil {
- if iter.Error == nil {
- iter.head = iter.tail
- iter.Error = io.EOF
- }
- return false
- }
- if iter.captured != nil {
- iter.captured = append(iter.captured,
- iter.buf[iter.captureStartedAt:iter.tail]...)
- iter.captureStartedAt = 0
- }
- for {
- n, err := iter.reader.Read(iter.buf)
- if n == 0 {
- if err != nil {
- if iter.Error == nil {
- iter.Error = err
- }
- return false
- }
- } else {
- iter.head = 0
- iter.tail = n
- return true
- }
- }
-}
-
-func (iter *Iterator) unreadByte() {
- if iter.Error != nil {
- return
- }
- iter.head--
- return
-}
-
-// Read read the next JSON element as generic interface{}.
-func (iter *Iterator) Read() interface{} {
- valueType := iter.WhatIsNext()
- switch valueType {
- case StringValue:
- return iter.ReadString()
- case NumberValue:
- if iter.cfg.configBeforeFrozen.UseNumber {
- return json.Number(iter.readNumberAsString())
- }
- return iter.ReadFloat64()
- case NilValue:
- iter.skipFourBytes('n', 'u', 'l', 'l')
- return nil
- case BoolValue:
- return iter.ReadBool()
- case ArrayValue:
- arr := []interface{}{}
- iter.ReadArrayCB(func(iter *Iterator) bool {
- var elem interface{}
- iter.ReadVal(&elem)
- arr = append(arr, elem)
- return true
- })
- return arr
- case ObjectValue:
- obj := map[string]interface{}{}
- iter.ReadMapCB(func(Iter *Iterator, field string) bool {
- var elem interface{}
- iter.ReadVal(&elem)
- obj[field] = elem
- return true
- })
- return obj
- default:
- iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
- return nil
- }
-}
-
-// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
-const maxDepth = 10000
-
-func (iter *Iterator) incrementDepth() (success bool) {
- iter.depth++
- if iter.depth <= maxDepth {
- return true
- }
- iter.ReportError("incrementDepth", "exceeded max depth")
- return false
-}
-
-func (iter *Iterator) decrementDepth() (success bool) {
- iter.depth--
- if iter.depth >= 0 {
- return true
- }
- iter.ReportError("decrementDepth", "unexpected negative nesting")
- return false
-}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
deleted file mode 100644
index 204fe0e0..00000000
--- a/vendor/github.com/json-iterator/go/iter_array.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package jsoniter
-
-// ReadArray read array element, tells if the array has more element to read.
-func (iter *Iterator) ReadArray() (ret bool) {
- c := iter.nextToken()
- switch c {
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l')
- return false // null
- case '[':
- c = iter.nextToken()
- if c != ']' {
- iter.unreadByte()
- return true
- }
- return false
- case ']':
- return false
- case ',':
- return true
- default:
- iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
- return
- }
-}
-
-// ReadArrayCB read array with callback
-func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
- c := iter.nextToken()
- if c == '[' {
- if !iter.incrementDepth() {
- return false
- }
- c = iter.nextToken()
- if c != ']' {
- iter.unreadByte()
- if !callback(iter) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- for c == ',' {
- if !callback(iter) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- }
- if c != ']' {
- iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- return iter.decrementDepth()
- }
- return iter.decrementDepth()
- }
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return true // null
- }
- iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
- return false
-}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
deleted file mode 100644
index 8a3d8b6f..00000000
--- a/vendor/github.com/json-iterator/go/iter_float.go
+++ /dev/null
@@ -1,342 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "io"
- "math/big"
- "strconv"
- "strings"
- "unsafe"
-)
-
-var floatDigits []int8
-
-const invalidCharForNumber = int8(-1)
-const endOfNumber = int8(-2)
-const dotInNumber = int8(-3)
-
-func init() {
- floatDigits = make([]int8, 256)
- for i := 0; i < len(floatDigits); i++ {
- floatDigits[i] = invalidCharForNumber
- }
- for i := int8('0'); i <= int8('9'); i++ {
- floatDigits[i] = i - int8('0')
- }
- floatDigits[','] = endOfNumber
- floatDigits[']'] = endOfNumber
- floatDigits['}'] = endOfNumber
- floatDigits[' '] = endOfNumber
- floatDigits['\t'] = endOfNumber
- floatDigits['\n'] = endOfNumber
- floatDigits['.'] = dotInNumber
-}
-
-// ReadBigFloat read big.Float
-func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return nil
- }
- prec := 64
- if len(str) > prec {
- prec = len(str)
- }
- val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
- if err != nil {
- iter.Error = err
- return nil
- }
- return val
-}
-
-// ReadBigInt read big.Int
-func (iter *Iterator) ReadBigInt() (ret *big.Int) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return nil
- }
- ret = big.NewInt(0)
- var success bool
- ret, success = ret.SetString(str, 10)
- if !success {
- iter.ReportError("ReadBigInt", "invalid big int")
- return nil
- }
- return ret
-}
-
-//ReadFloat32 read float32
-func (iter *Iterator) ReadFloat32() (ret float32) {
- c := iter.nextToken()
- if c == '-' {
- return -iter.readPositiveFloat32()
- }
- iter.unreadByte()
- return iter.readPositiveFloat32()
-}
-
-func (iter *Iterator) readPositiveFloat32() (ret float32) {
- i := iter.head
- // first char
- if i == iter.tail {
- return iter.readFloat32SlowPath()
- }
- c := iter.buf[i]
- i++
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat32SlowPath()
- case endOfNumber:
- iter.ReportError("readFloat32", "empty number")
- return
- case dotInNumber:
- iter.ReportError("readFloat32", "leading dot is invalid")
- return
- case 0:
- if i == iter.tail {
- return iter.readFloat32SlowPath()
- }
- c = iter.buf[i]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- iter.ReportError("readFloat32", "leading zero is invalid")
- return
- }
- }
- value := uint64(ind)
- // chars before dot
-non_decimal_loop:
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat32SlowPath()
- case endOfNumber:
- iter.head = i
- return float32(value)
- case dotInNumber:
- break non_decimal_loop
- }
- if value > uint64SafeToMultiple10 {
- return iter.readFloat32SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
- }
- // chars after dot
- if c == '.' {
- i++
- decimalPlaces := 0
- if i == iter.tail {
- return iter.readFloat32SlowPath()
- }
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case endOfNumber:
- if decimalPlaces > 0 && decimalPlaces < len(pow10) {
- iter.head = i
- return float32(float64(value) / float64(pow10[decimalPlaces]))
- }
- // too many decimal places
- return iter.readFloat32SlowPath()
- case invalidCharForNumber, dotInNumber:
- return iter.readFloat32SlowPath()
- }
- decimalPlaces++
- if value > uint64SafeToMultiple10 {
- return iter.readFloat32SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind)
- }
- }
- return iter.readFloat32SlowPath()
-}
-
-func (iter *Iterator) readNumberAsString() (ret string) {
- strBuf := [16]byte{}
- str := strBuf[0:0]
-load_loop:
- for {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- str = append(str, c)
- continue
- default:
- iter.head = i
- break load_loop
- }
- }
- if !iter.loadMore() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- if len(str) == 0 {
- iter.ReportError("readNumberAsString", "invalid number")
- }
- return *(*string)(unsafe.Pointer(&str))
-}
-
-func (iter *Iterator) readFloat32SlowPath() (ret float32) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- errMsg := validateFloat(str)
- if errMsg != "" {
- iter.ReportError("readFloat32SlowPath", errMsg)
- return
- }
- val, err := strconv.ParseFloat(str, 32)
- if err != nil {
- iter.Error = err
- return
- }
- return float32(val)
-}
-
-// ReadFloat64 read float64
-func (iter *Iterator) ReadFloat64() (ret float64) {
- c := iter.nextToken()
- if c == '-' {
- return -iter.readPositiveFloat64()
- }
- iter.unreadByte()
- return iter.readPositiveFloat64()
-}
-
-func (iter *Iterator) readPositiveFloat64() (ret float64) {
- i := iter.head
- // first char
- if i == iter.tail {
- return iter.readFloat64SlowPath()
- }
- c := iter.buf[i]
- i++
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat64SlowPath()
- case endOfNumber:
- iter.ReportError("readFloat64", "empty number")
- return
- case dotInNumber:
- iter.ReportError("readFloat64", "leading dot is invalid")
- return
- case 0:
- if i == iter.tail {
- return iter.readFloat64SlowPath()
- }
- c = iter.buf[i]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- iter.ReportError("readFloat64", "leading zero is invalid")
- return
- }
- }
- value := uint64(ind)
- // chars before dot
-non_decimal_loop:
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat64SlowPath()
- case endOfNumber:
- iter.head = i
- return float64(value)
- case dotInNumber:
- break non_decimal_loop
- }
- if value > uint64SafeToMultiple10 {
- return iter.readFloat64SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
- }
- // chars after dot
- if c == '.' {
- i++
- decimalPlaces := 0
- if i == iter.tail {
- return iter.readFloat64SlowPath()
- }
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case endOfNumber:
- if decimalPlaces > 0 && decimalPlaces < len(pow10) {
- iter.head = i
- return float64(value) / float64(pow10[decimalPlaces])
- }
- // too many decimal places
- return iter.readFloat64SlowPath()
- case invalidCharForNumber, dotInNumber:
- return iter.readFloat64SlowPath()
- }
- decimalPlaces++
- if value > uint64SafeToMultiple10 {
- return iter.readFloat64SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind)
- if value > maxFloat64 {
- return iter.readFloat64SlowPath()
- }
- }
- }
- return iter.readFloat64SlowPath()
-}
-
-func (iter *Iterator) readFloat64SlowPath() (ret float64) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- errMsg := validateFloat(str)
- if errMsg != "" {
- iter.ReportError("readFloat64SlowPath", errMsg)
- return
- }
- val, err := strconv.ParseFloat(str, 64)
- if err != nil {
- iter.Error = err
- return
- }
- return val
-}
-
-func validateFloat(str string) string {
- // strconv.ParseFloat is not validating `1.` or `1.e1`
- if len(str) == 0 {
- return "empty number"
- }
- if str[0] == '-' {
- return "-- is not valid"
- }
- dotPos := strings.IndexByte(str, '.')
- if dotPos != -1 {
- if dotPos == len(str)-1 {
- return "dot can not be last character"
- }
- switch str[dotPos+1] {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- default:
- return "missing digit after dot"
- }
- }
- return ""
-}
-
-// ReadNumber read json.Number
-func (iter *Iterator) ReadNumber() (ret json.Number) {
- return json.Number(iter.readNumberAsString())
-}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
deleted file mode 100644
index d786a89f..00000000
--- a/vendor/github.com/json-iterator/go/iter_int.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package jsoniter
-
-import (
- "math"
- "strconv"
-)
-
-var intDigits []int8
-
-const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
-const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
-const maxFloat64 = 1<<53 - 1
-
-func init() {
- intDigits = make([]int8, 256)
- for i := 0; i < len(intDigits); i++ {
- intDigits[i] = invalidCharForNumber
- }
- for i := int8('0'); i <= int8('9'); i++ {
- intDigits[i] = i - int8('0')
- }
-}
-
-// ReadUint read uint
-func (iter *Iterator) ReadUint() uint {
- if strconv.IntSize == 32 {
- return uint(iter.ReadUint32())
- }
- return uint(iter.ReadUint64())
-}
-
-// ReadInt read int
-func (iter *Iterator) ReadInt() int {
- if strconv.IntSize == 32 {
- return int(iter.ReadInt32())
- }
- return int(iter.ReadInt64())
-}
-
-// ReadInt8 read int8
-func (iter *Iterator) ReadInt8() (ret int8) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint32(iter.readByte())
- if val > math.MaxInt8+1 {
- iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return -int8(val)
- }
- val := iter.readUint32(c)
- if val > math.MaxInt8 {
- iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return int8(val)
-}
-
-// ReadUint8 read uint8
-func (iter *Iterator) ReadUint8() (ret uint8) {
- val := iter.readUint32(iter.nextToken())
- if val > math.MaxUint8 {
- iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return uint8(val)
-}
-
-// ReadInt16 read int16
-func (iter *Iterator) ReadInt16() (ret int16) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint32(iter.readByte())
- if val > math.MaxInt16+1 {
- iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return -int16(val)
- }
- val := iter.readUint32(c)
- if val > math.MaxInt16 {
- iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return int16(val)
-}
-
-// ReadUint16 read uint16
-func (iter *Iterator) ReadUint16() (ret uint16) {
- val := iter.readUint32(iter.nextToken())
- if val > math.MaxUint16 {
- iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return uint16(val)
-}
-
-// ReadInt32 read int32
-func (iter *Iterator) ReadInt32() (ret int32) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint32(iter.readByte())
- if val > math.MaxInt32+1 {
- iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return -int32(val)
- }
- val := iter.readUint32(c)
- if val > math.MaxInt32 {
- iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return int32(val)
-}
-
-// ReadUint32 read uint32
-func (iter *Iterator) ReadUint32() (ret uint32) {
- return iter.readUint32(iter.nextToken())
-}
-
-func (iter *Iterator) readUint32(c byte) (ret uint32) {
- ind := intDigits[c]
- if ind == 0 {
- iter.assertInteger()
- return 0 // single zero
- }
- if ind == invalidCharForNumber {
- iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
- return
- }
- value := uint32(ind)
- if iter.tail-iter.head > 10 {
- i := iter.head
- ind2 := intDigits[iter.buf[i]]
- if ind2 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- i++
- ind3 := intDigits[iter.buf[i]]
- if ind3 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10 + uint32(ind2)
- }
- //iter.head = i + 1
- //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
- i++
- ind4 := intDigits[iter.buf[i]]
- if ind4 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100 + uint32(ind2)*10 + uint32(ind3)
- }
- i++
- ind5 := intDigits[iter.buf[i]]
- if ind5 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
- }
- i++
- ind6 := intDigits[iter.buf[i]]
- if ind6 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
- }
- i++
- ind7 := intDigits[iter.buf[i]]
- if ind7 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
- }
- i++
- ind8 := intDigits[iter.buf[i]]
- if ind8 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
- }
- i++
- ind9 := intDigits[iter.buf[i]]
- value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
- iter.head = i
- if ind9 == invalidCharForNumber {
- iter.assertInteger()
- return value
- }
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- ind = intDigits[iter.buf[i]]
- if ind == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- if value > uint32SafeToMultiply10 {
- value2 := (value << 3) + (value << 1) + uint32(ind)
- if value2 < value {
- iter.ReportError("readUint32", "overflow")
- return
- }
- value = value2
- continue
- }
- value = (value << 3) + (value << 1) + uint32(ind)
- }
- if !iter.loadMore() {
- iter.assertInteger()
- return value
- }
- }
-}
-
-// ReadInt64 read int64
-func (iter *Iterator) ReadInt64() (ret int64) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint64(iter.readByte())
- if val > math.MaxInt64+1 {
- iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
- return
- }
- return -int64(val)
- }
- val := iter.readUint64(c)
- if val > math.MaxInt64 {
- iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
- return
- }
- return int64(val)
-}
-
-// ReadUint64 read uint64
-func (iter *Iterator) ReadUint64() uint64 {
- return iter.readUint64(iter.nextToken())
-}
-
-func (iter *Iterator) readUint64(c byte) (ret uint64) {
- ind := intDigits[c]
- if ind == 0 {
- iter.assertInteger()
- return 0 // single zero
- }
- if ind == invalidCharForNumber {
- iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
- return
- }
- value := uint64(ind)
- if iter.tail-iter.head > 10 {
- i := iter.head
- ind2 := intDigits[iter.buf[i]]
- if ind2 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- i++
- ind3 := intDigits[iter.buf[i]]
- if ind3 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10 + uint64(ind2)
- }
- //iter.head = i + 1
- //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
- i++
- ind4 := intDigits[iter.buf[i]]
- if ind4 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100 + uint64(ind2)*10 + uint64(ind3)
- }
- i++
- ind5 := intDigits[iter.buf[i]]
- if ind5 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
- }
- i++
- ind6 := intDigits[iter.buf[i]]
- if ind6 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
- }
- i++
- ind7 := intDigits[iter.buf[i]]
- if ind7 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
- }
- i++
- ind8 := intDigits[iter.buf[i]]
- if ind8 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
- }
- i++
- ind9 := intDigits[iter.buf[i]]
- value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
- iter.head = i
- if ind9 == invalidCharForNumber {
- iter.assertInteger()
- return value
- }
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- ind = intDigits[iter.buf[i]]
- if ind == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- if value > uint64SafeToMultiple10 {
- value2 := (value << 3) + (value << 1) + uint64(ind)
- if value2 < value {
- iter.ReportError("readUint64", "overflow")
- return
- }
- value = value2
- continue
- }
- value = (value << 3) + (value << 1) + uint64(ind)
- }
- if !iter.loadMore() {
- iter.assertInteger()
- return value
- }
- }
-}
-
-func (iter *Iterator) assertInteger() {
- if iter.head < iter.tail && iter.buf[iter.head] == '.' {
- iter.ReportError("assertInteger", "can not decode float as int")
- }
-}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
deleted file mode 100644
index 58ee89c8..00000000
--- a/vendor/github.com/json-iterator/go/iter_object.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "strings"
-)
-
-// ReadObject read one field from object.
-// If object ended, returns empty string.
-// Otherwise, returns the field name.
-func (iter *Iterator) ReadObject() (ret string) {
- c := iter.nextToken()
- switch c {
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l')
- return "" // null
- case '{':
- c = iter.nextToken()
- if c == '"' {
- iter.unreadByte()
- field := iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- return field
- }
- if c == '}' {
- return "" // end of object
- }
- iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
- return
- case ',':
- field := iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- return field
- case '}':
- return "" // end of object
- default:
- iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
- return
- }
-}
-
-// CaseInsensitive
-func (iter *Iterator) readFieldHash() int64 {
- hash := int64(0x811c9dc5)
- c := iter.nextToken()
- if c != '"' {
- iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
- return 0
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- // require ascii string and no escape
- b := iter.buf[i]
- if b == '\\' {
- iter.head = i
- for _, b := range iter.readStringSlowPath() {
- if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
- b += 'a' - 'A'
- }
- hash ^= int64(b)
- hash *= 0x1000193
- }
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
- return 0
- }
- return hash
- }
- if b == '"' {
- iter.head = i + 1
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
- return 0
- }
- return hash
- }
- if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
- b += 'a' - 'A'
- }
- hash ^= int64(b)
- hash *= 0x1000193
- }
- if !iter.loadMore() {
- iter.ReportError("readFieldHash", `incomplete field name`)
- return 0
- }
- }
-}
-
-func calcHash(str string, caseSensitive bool) int64 {
- if !caseSensitive {
- str = strings.ToLower(str)
- }
- hash := int64(0x811c9dc5)
- for _, b := range []byte(str) {
- hash ^= int64(b)
- hash *= 0x1000193
- }
- return int64(hash)
-}
-
-// ReadObjectCB read object with callback, the key is ascii only and field name not copied
-func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
- c := iter.nextToken()
- var field string
- if c == '{' {
- if !iter.incrementDepth() {
- return false
- }
- c = iter.nextToken()
- if c == '"' {
- iter.unreadByte()
- field = iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- for c == ',' {
- field = iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- }
- if c != '}' {
- iter.ReportError("ReadObjectCB", `object not ended with }`)
- iter.decrementDepth()
- return false
- }
- return iter.decrementDepth()
- }
- if c == '}' {
- return iter.decrementDepth()
- }
- iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return true // null
- }
- iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
- return false
-}
-
-// ReadMapCB read map with callback, the key can be any string
-func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
- c := iter.nextToken()
- if c == '{' {
- if !iter.incrementDepth() {
- return false
- }
- c = iter.nextToken()
- if c == '"' {
- iter.unreadByte()
- field := iter.ReadString()
- if iter.nextToken() != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- for c == ',' {
- field = iter.ReadString()
- if iter.nextToken() != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- }
- if c != '}' {
- iter.ReportError("ReadMapCB", `object not ended with }`)
- iter.decrementDepth()
- return false
- }
- return iter.decrementDepth()
- }
- if c == '}' {
- return iter.decrementDepth()
- }
- iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return true // null
- }
- iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
- return false
-}
-
-func (iter *Iterator) readObjectStart() bool {
- c := iter.nextToken()
- if c == '{' {
- c = iter.nextToken()
- if c == '}' {
- return false
- }
- iter.unreadByte()
- return true
- } else if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return false
- }
- iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
- return false
-}
-
-func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
- str := iter.ReadStringAsSlice()
- if iter.skipWhitespacesWithoutLoadMore() {
- if ret == nil {
- ret = make([]byte, len(str))
- copy(ret, str)
- }
- if !iter.loadMore() {
- return
- }
- }
- if iter.buf[iter.head] != ':' {
- iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
- return
- }
- iter.head++
- if iter.skipWhitespacesWithoutLoadMore() {
- if ret == nil {
- ret = make([]byte, len(str))
- copy(ret, str)
- }
- if !iter.loadMore() {
- return
- }
- }
- if ret == nil {
- return str
- }
- return ret
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
deleted file mode 100644
index e91eefb1..00000000
--- a/vendor/github.com/json-iterator/go/iter_skip.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package jsoniter
-
-import "fmt"
-
-// ReadNil reads a json object as nil and
-// returns whether it's a nil or not
-func (iter *Iterator) ReadNil() (ret bool) {
- c := iter.nextToken()
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l') // null
- return true
- }
- iter.unreadByte()
- return false
-}
-
-// ReadBool reads a json object as BoolValue
-func (iter *Iterator) ReadBool() (ret bool) {
- c := iter.nextToken()
- if c == 't' {
- iter.skipThreeBytes('r', 'u', 'e')
- return true
- }
- if c == 'f' {
- iter.skipFourBytes('a', 'l', 's', 'e')
- return false
- }
- iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
- return
-}
-
-// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
-// The []byte can be kept, it is a copy of data.
-func (iter *Iterator) SkipAndReturnBytes() []byte {
- iter.startCapture(iter.head)
- iter.Skip()
- return iter.stopCapture()
-}
-
-// SkipAndAppendBytes skips next JSON element and appends its content to
-// buffer, returning the result.
-func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
- iter.startCaptureTo(buf, iter.head)
- iter.Skip()
- return iter.stopCapture()
-}
-
-func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
- if iter.captured != nil {
- panic("already in capture mode")
- }
- iter.captureStartedAt = captureStartedAt
- iter.captured = buf
-}
-
-func (iter *Iterator) startCapture(captureStartedAt int) {
- iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
-}
-
-func (iter *Iterator) stopCapture() []byte {
- if iter.captured == nil {
- panic("not in capture mode")
- }
- captured := iter.captured
- remaining := iter.buf[iter.captureStartedAt:iter.head]
- iter.captureStartedAt = -1
- iter.captured = nil
- return append(captured, remaining...)
-}
-
-// Skip skips a json object and positions to relatively the next json object
-func (iter *Iterator) Skip() {
- c := iter.nextToken()
- switch c {
- case '"':
- iter.skipString()
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l') // null
- case 't':
- iter.skipThreeBytes('r', 'u', 'e') // true
- case 'f':
- iter.skipFourBytes('a', 'l', 's', 'e') // false
- case '0':
- iter.unreadByte()
- iter.ReadFloat32()
- case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- iter.skipNumber()
- case '[':
- iter.skipArray()
- case '{':
- iter.skipObject()
- default:
- iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
- return
- }
-}
-
-func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
- if iter.readByte() != b1 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
- if iter.readByte() != b2 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
- if iter.readByte() != b3 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
- if iter.readByte() != b4 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
-}
-
-func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
- if iter.readByte() != b1 {
- iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
- return
- }
- if iter.readByte() != b2 {
- iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
- return
- }
- if iter.readByte() != b3 {
- iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
deleted file mode 100644
index 9303de41..00000000
--- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
+++ /dev/null
@@ -1,163 +0,0 @@
-//+build jsoniter_sloppy
-
-package jsoniter
-
-// sloppy but faster implementation, do not validate the input json
-
-func (iter *Iterator) skipNumber() {
- for {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case ' ', '\n', '\r', '\t', ',', '}', ']':
- iter.head = i
- return
- }
- }
- if !iter.loadMore() {
- return
- }
- }
-}
-
-func (iter *Iterator) skipArray() {
- level := 1
- if !iter.incrementDepth() {
- return
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- switch iter.buf[i] {
- case '"': // If inside string, skip it
- iter.head = i + 1
- iter.skipString()
- i = iter.head - 1 // it will be i++ soon
- case '[': // If open symbol, increase level
- level++
- if !iter.incrementDepth() {
- return
- }
- case ']': // If close symbol, increase level
- level--
- if !iter.decrementDepth() {
- return
- }
-
- // If we have returned to the original level, we're done
- if level == 0 {
- iter.head = i + 1
- return
- }
- }
- }
- if !iter.loadMore() {
- iter.ReportError("skipObject", "incomplete array")
- return
- }
- }
-}
-
-func (iter *Iterator) skipObject() {
- level := 1
- if !iter.incrementDepth() {
- return
- }
-
- for {
- for i := iter.head; i < iter.tail; i++ {
- switch iter.buf[i] {
- case '"': // If inside string, skip it
- iter.head = i + 1
- iter.skipString()
- i = iter.head - 1 // it will be i++ soon
- case '{': // If open symbol, increase level
- level++
- if !iter.incrementDepth() {
- return
- }
- case '}': // If close symbol, increase level
- level--
- if !iter.decrementDepth() {
- return
- }
-
- // If we have returned to the original level, we're done
- if level == 0 {
- iter.head = i + 1
- return
- }
- }
- }
- if !iter.loadMore() {
- iter.ReportError("skipObject", "incomplete object")
- return
- }
- }
-}
-
-func (iter *Iterator) skipString() {
- for {
- end, escaped := iter.findStringEnd()
- if end == -1 {
- if !iter.loadMore() {
- iter.ReportError("skipString", "incomplete string")
- return
- }
- if escaped {
- iter.head = 1 // skip the first char as last char read is \
- }
- } else {
- iter.head = end
- return
- }
- }
-}
-
-// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
-// Tries to find the end of string
-// Support if string contains escaped quote symbols.
-func (iter *Iterator) findStringEnd() (int, bool) {
- escaped := false
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- if c == '"' {
- if !escaped {
- return i + 1, false
- }
- j := i - 1
- for {
- if j < iter.head || iter.buf[j] != '\\' {
- // even number of backslashes
- // either end of buffer, or " found
- return i + 1, true
- }
- j--
- if j < iter.head || iter.buf[j] != '\\' {
- // odd number of backslashes
- // it is \" or \\\"
- break
- }
- j--
- }
- } else if c == '\\' {
- escaped = true
- }
- }
- j := iter.tail - 1
- for {
- if j < iter.head || iter.buf[j] != '\\' {
- // even number of backslashes
- // either end of buffer, or " found
- return -1, false // do not end with \
- }
- j--
- if j < iter.head || iter.buf[j] != '\\' {
- // odd number of backslashes
- // it is \" or \\\"
- break
- }
- j--
-
- }
- return -1, true // end with \
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
deleted file mode 100644
index 6cf66d04..00000000
--- a/vendor/github.com/json-iterator/go/iter_skip_strict.go
+++ /dev/null
@@ -1,99 +0,0 @@
-//+build !jsoniter_sloppy
-
-package jsoniter
-
-import (
- "fmt"
- "io"
-)
-
-func (iter *Iterator) skipNumber() {
- if !iter.trySkipNumber() {
- iter.unreadByte()
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- iter.ReadFloat64()
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = nil
- iter.ReadBigFloat()
- }
- }
-}
-
-func (iter *Iterator) trySkipNumber() bool {
- dotFound := false
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- case '.':
- if dotFound {
- iter.ReportError("validateNumber", `more than one dot found in number`)
- return true // already failed
- }
- if i+1 == iter.tail {
- return false
- }
- c = iter.buf[i+1]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- default:
- iter.ReportError("validateNumber", `missing digit after dot`)
- return true // already failed
- }
- dotFound = true
- default:
- switch c {
- case ',', ']', '}', ' ', '\t', '\n', '\r':
- if iter.head == i {
- return false // if - without following digits
- }
- iter.head = i
- return true // must be valid
- }
- return false // may be invalid
- }
- }
- return false
-}
-
-func (iter *Iterator) skipString() {
- if !iter.trySkipString() {
- iter.unreadByte()
- iter.ReadString()
- }
-}
-
-func (iter *Iterator) trySkipString() bool {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- if c == '"' {
- iter.head = i + 1
- return true // valid
- } else if c == '\\' {
- return false
- } else if c < ' ' {
- iter.ReportError("trySkipString",
- fmt.Sprintf(`invalid control character found: %d`, c))
- return true // already failed
- }
- }
- return false
-}
-
-func (iter *Iterator) skipObject() {
- iter.unreadByte()
- iter.ReadObjectCB(func(iter *Iterator, field string) bool {
- iter.Skip()
- return true
- })
-}
-
-func (iter *Iterator) skipArray() {
- iter.unreadByte()
- iter.ReadArrayCB(func(iter *Iterator) bool {
- iter.Skip()
- return true
- })
-}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
deleted file mode 100644
index adc487ea..00000000
--- a/vendor/github.com/json-iterator/go/iter_str.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "unicode/utf16"
-)
-
-// ReadString read string from iterator
-func (iter *Iterator) ReadString() (ret string) {
- c := iter.nextToken()
- if c == '"' {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- if c == '"' {
- ret = string(iter.buf[iter.head:i])
- iter.head = i + 1
- return ret
- } else if c == '\\' {
- break
- } else if c < ' ' {
- iter.ReportError("ReadString",
- fmt.Sprintf(`invalid control character found: %d`, c))
- return
- }
- }
- return iter.readStringSlowPath()
- } else if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return ""
- }
- iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
- return
-}
-
-func (iter *Iterator) readStringSlowPath() (ret string) {
- var str []byte
- var c byte
- for iter.Error == nil {
- c = iter.readByte()
- if c == '"' {
- return string(str)
- }
- if c == '\\' {
- c = iter.readByte()
- str = iter.readEscapedChar(c, str)
- } else {
- str = append(str, c)
- }
- }
- iter.ReportError("readStringSlowPath", "unexpected end of input")
- return
-}
-
-func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
- switch c {
- case 'u':
- r := iter.readU4()
- if utf16.IsSurrogate(r) {
- c = iter.readByte()
- if iter.Error != nil {
- return nil
- }
- if c != '\\' {
- iter.unreadByte()
- str = appendRune(str, r)
- return str
- }
- c = iter.readByte()
- if iter.Error != nil {
- return nil
- }
- if c != 'u' {
- str = appendRune(str, r)
- return iter.readEscapedChar(c, str)
- }
- r2 := iter.readU4()
- if iter.Error != nil {
- return nil
- }
- combined := utf16.DecodeRune(r, r2)
- if combined == '\uFFFD' {
- str = appendRune(str, r)
- str = appendRune(str, r2)
- } else {
- str = appendRune(str, combined)
- }
- } else {
- str = appendRune(str, r)
- }
- case '"':
- str = append(str, '"')
- case '\\':
- str = append(str, '\\')
- case '/':
- str = append(str, '/')
- case 'b':
- str = append(str, '\b')
- case 'f':
- str = append(str, '\f')
- case 'n':
- str = append(str, '\n')
- case 'r':
- str = append(str, '\r')
- case 't':
- str = append(str, '\t')
- default:
- iter.ReportError("readEscapedChar",
- `invalid escape char after \`)
- return nil
- }
- return str
-}
-
-// ReadStringAsSlice read string from iterator without copying into string form.
-// The []byte can not be kept, as it will change after next iterator call.
-func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
- c := iter.nextToken()
- if c == '"' {
- for i := iter.head; i < iter.tail; i++ {
- // require ascii string and no escape
- // for: field name, base64, number
- if iter.buf[i] == '"' {
- // fast path: reuse the underlying buffer
- ret = iter.buf[iter.head:i]
- iter.head = i + 1
- return ret
- }
- }
- readLen := iter.tail - iter.head
- copied := make([]byte, readLen, readLen*2)
- copy(copied, iter.buf[iter.head:iter.tail])
- iter.head = iter.tail
- for iter.Error == nil {
- c := iter.readByte()
- if c == '"' {
- return copied
- }
- copied = append(copied, c)
- }
- return copied
- }
- iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
- return
-}
-
-func (iter *Iterator) readU4() (ret rune) {
- for i := 0; i < 4; i++ {
- c := iter.readByte()
- if iter.Error != nil {
- return
- }
- if c >= '0' && c <= '9' {
- ret = ret*16 + rune(c-'0')
- } else if c >= 'a' && c <= 'f' {
- ret = ret*16 + rune(c-'a'+10)
- } else if c >= 'A' && c <= 'F' {
- ret = ret*16 + rune(c-'A'+10)
- } else {
- iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
- return
- }
- }
- return ret
-}
-
-const (
- t1 = 0x00 // 0000 0000
- tx = 0x80 // 1000 0000
- t2 = 0xC0 // 1100 0000
- t3 = 0xE0 // 1110 0000
- t4 = 0xF0 // 1111 0000
- t5 = 0xF8 // 1111 1000
-
- maskx = 0x3F // 0011 1111
- mask2 = 0x1F // 0001 1111
- mask3 = 0x0F // 0000 1111
- mask4 = 0x07 // 0000 0111
-
- rune1Max = 1<<7 - 1
- rune2Max = 1<<11 - 1
- rune3Max = 1<<16 - 1
-
- surrogateMin = 0xD800
- surrogateMax = 0xDFFF
-
- maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
- runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
-)
-
-func appendRune(p []byte, r rune) []byte {
- // Negative values are erroneous. Making it unsigned addresses the problem.
- switch i := uint32(r); {
- case i <= rune1Max:
- p = append(p, byte(r))
- return p
- case i <= rune2Max:
- p = append(p, t2|byte(r>>6))
- p = append(p, tx|byte(r)&maskx)
- return p
- case i > maxRune, surrogateMin <= i && i <= surrogateMax:
- r = runeError
- fallthrough
- case i <= rune3Max:
- p = append(p, t3|byte(r>>12))
- p = append(p, tx|byte(r>>6)&maskx)
- p = append(p, tx|byte(r)&maskx)
- return p
- default:
- p = append(p, t4|byte(r>>18))
- p = append(p, tx|byte(r>>12)&maskx)
- p = append(p, tx|byte(r>>6)&maskx)
- p = append(p, tx|byte(r)&maskx)
- return p
- }
-}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
deleted file mode 100644
index c2934f91..00000000
--- a/vendor/github.com/json-iterator/go/jsoniter.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Package jsoniter implements encoding and decoding of JSON as defined in
-// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
-// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
-// and variable type declarations (if any).
-// jsoniter interfaces gives 100% compatibility with code using standard lib.
-//
-// "JSON and Go"
-// (https://golang.org/doc/articles/json_and_go.html)
-// gives a description of how Marshal/Unmarshal operate
-// between arbitrary or predefined json objects and bytes,
-// and it applies to jsoniter.Marshal/Unmarshal as well.
-//
-// Besides, jsoniter.Iterator provides a different set of interfaces
-// iterating given bytes/string/reader
-// and yielding parsed elements one by one.
-// This set of interfaces reads input as required and gives
-// better performance.
-package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
deleted file mode 100644
index e2389b56..00000000
--- a/vendor/github.com/json-iterator/go/pool.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package jsoniter
-
-import (
- "io"
-)
-
-// IteratorPool a thread safe pool of iterators with same configuration
-type IteratorPool interface {
- BorrowIterator(data []byte) *Iterator
- ReturnIterator(iter *Iterator)
-}
-
-// StreamPool a thread safe pool of streams with same configuration
-type StreamPool interface {
- BorrowStream(writer io.Writer) *Stream
- ReturnStream(stream *Stream)
-}
-
-func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
- stream := cfg.streamPool.Get().(*Stream)
- stream.Reset(writer)
- return stream
-}
-
-func (cfg *frozenConfig) ReturnStream(stream *Stream) {
- stream.out = nil
- stream.Error = nil
- stream.Attachment = nil
- cfg.streamPool.Put(stream)
-}
-
-func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
- iter := cfg.iteratorPool.Get().(*Iterator)
- iter.ResetBytes(data)
- return iter
-}
-
-func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
- iter.Error = nil
- iter.Attachment = nil
- cfg.iteratorPool.Put(iter)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
deleted file mode 100644
index 39acb320..00000000
--- a/vendor/github.com/json-iterator/go/reflect.go
+++ /dev/null
@@ -1,337 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "reflect"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-// ValDecoder is an internal type registered to cache as needed.
-// Don't confuse jsoniter.ValDecoder with json.Decoder.
-// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
-//
-// Reflection on type to create decoders, which is then cached
-// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
-// 1. create instance of new value, for example *int will need a int to be allocated
-// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
-// 3. assignment to map, both key and value will be reflect.Value
-// For a simple struct binding, it will be reflect.Value free and allocation free
-type ValDecoder interface {
- Decode(ptr unsafe.Pointer, iter *Iterator)
-}
-
-// ValEncoder is an internal type registered to cache as needed.
-// Don't confuse jsoniter.ValEncoder with json.Encoder.
-// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
-type ValEncoder interface {
- IsEmpty(ptr unsafe.Pointer) bool
- Encode(ptr unsafe.Pointer, stream *Stream)
-}
-
-type checkIsEmpty interface {
- IsEmpty(ptr unsafe.Pointer) bool
-}
-
-type ctx struct {
- *frozenConfig
- prefix string
- encoders map[reflect2.Type]ValEncoder
- decoders map[reflect2.Type]ValDecoder
-}
-
-func (b *ctx) caseSensitive() bool {
- if b.frozenConfig == nil {
- // default is case-insensitive
- return false
- }
- return b.frozenConfig.caseSensitive
-}
-
-func (b *ctx) append(prefix string) *ctx {
- return &ctx{
- frozenConfig: b.frozenConfig,
- prefix: b.prefix + " " + prefix,
- encoders: b.encoders,
- decoders: b.decoders,
- }
-}
-
-// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
-func (iter *Iterator) ReadVal(obj interface{}) {
- depth := iter.depth
- cacheKey := reflect2.RTypeOf(obj)
- decoder := iter.cfg.getDecoderFromCache(cacheKey)
- if decoder == nil {
- typ := reflect2.TypeOf(obj)
- if typ == nil || typ.Kind() != reflect.Ptr {
- iter.ReportError("ReadVal", "can only unmarshal into pointer")
- return
- }
- decoder = iter.cfg.DecoderOf(typ)
- }
- ptr := reflect2.PtrOf(obj)
- if ptr == nil {
- iter.ReportError("ReadVal", "can not read into nil pointer")
- return
- }
- decoder.Decode(ptr, iter)
- if iter.depth != depth {
- iter.ReportError("ReadVal", "unexpected mismatched nesting")
- return
- }
-}
-
-// WriteVal copy the go interface into underlying JSON, same as json.Marshal
-func (stream *Stream) WriteVal(val interface{}) {
- if nil == val {
- stream.WriteNil()
- return
- }
- cacheKey := reflect2.RTypeOf(val)
- encoder := stream.cfg.getEncoderFromCache(cacheKey)
- if encoder == nil {
- typ := reflect2.TypeOf(val)
- encoder = stream.cfg.EncoderOf(typ)
- }
- encoder.Encode(reflect2.PtrOf(val), stream)
-}
-
-func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
- cacheKey := typ.RType()
- decoder := cfg.getDecoderFromCache(cacheKey)
- if decoder != nil {
- return decoder
- }
- ctx := &ctx{
- frozenConfig: cfg,
- prefix: "",
- decoders: map[reflect2.Type]ValDecoder{},
- encoders: map[reflect2.Type]ValEncoder{},
- }
- ptrType := typ.(*reflect2.UnsafePtrType)
- decoder = decoderOfType(ctx, ptrType.Elem())
- cfg.addDecoderToCache(cacheKey, decoder)
- return decoder
-}
-
-func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := getTypeDecoderFromExtension(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfType(ctx, typ)
- for _, extension := range extensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
- for _, extension := range ctx.extraExtensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- return decoder
-}
-
-func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := ctx.decoders[typ]
- if decoder != nil {
- return decoder
- }
- placeholder := &placeholderDecoder{}
- ctx.decoders[typ] = placeholder
- decoder = _createDecoderOfType(ctx, typ)
- placeholder.decoder = decoder
- return decoder
-}
-
-func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := createDecoderOfJsonRawMessage(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfJsonNumber(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfMarshaler(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfAny(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfNative(ctx, typ)
- if decoder != nil {
- return decoder
- }
- switch typ.Kind() {
- case reflect.Interface:
- ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
- if isIFace {
- return &ifaceDecoder{valType: ifaceType}
- }
- return &efaceDecoder{}
- case reflect.Struct:
- return decoderOfStruct(ctx, typ)
- case reflect.Array:
- return decoderOfArray(ctx, typ)
- case reflect.Slice:
- return decoderOfSlice(ctx, typ)
- case reflect.Map:
- return decoderOfMap(ctx, typ)
- case reflect.Ptr:
- return decoderOfOptional(ctx, typ)
- default:
- return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
- }
-}
-
-func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
- cacheKey := typ.RType()
- encoder := cfg.getEncoderFromCache(cacheKey)
- if encoder != nil {
- return encoder
- }
- ctx := &ctx{
- frozenConfig: cfg,
- prefix: "",
- decoders: map[reflect2.Type]ValDecoder{},
- encoders: map[reflect2.Type]ValEncoder{},
- }
- encoder = encoderOfType(ctx, typ)
- if typ.LikePtr() {
- encoder = &onePtrEncoder{encoder}
- }
- cfg.addEncoderToCache(cacheKey, encoder)
- return encoder
-}
-
-type onePtrEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
-}
-
-func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
-}
-
-func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := getTypeEncoderFromExtension(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfType(ctx, typ)
- for _, extension := range extensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
- for _, extension := range ctx.extraExtensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- return encoder
-}
-
-func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := ctx.encoders[typ]
- if encoder != nil {
- return encoder
- }
- placeholder := &placeholderEncoder{}
- ctx.encoders[typ] = placeholder
- encoder = _createEncoderOfType(ctx, typ)
- placeholder.encoder = encoder
- return encoder
-}
-func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := createEncoderOfJsonRawMessage(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfJsonNumber(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfMarshaler(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfAny(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfNative(ctx, typ)
- if encoder != nil {
- return encoder
- }
- kind := typ.Kind()
- switch kind {
- case reflect.Interface:
- return &dynamicEncoder{typ}
- case reflect.Struct:
- return encoderOfStruct(ctx, typ)
- case reflect.Array:
- return encoderOfArray(ctx, typ)
- case reflect.Slice:
- return encoderOfSlice(ctx, typ)
- case reflect.Map:
- return encoderOfMap(ctx, typ)
- case reflect.Ptr:
- return encoderOfOptional(ctx, typ)
- default:
- return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
- }
-}
-
-type lazyErrorDecoder struct {
- err error
-}
-
-func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.WhatIsNext() != NilValue {
- if iter.Error == nil {
- iter.Error = decoder.err
- }
- } else {
- iter.Skip()
- }
-}
-
-type lazyErrorEncoder struct {
- err error
-}
-
-func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if ptr == nil {
- stream.WriteNil()
- } else if stream.Error == nil {
- stream.Error = encoder.err
- }
-}
-
-func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type placeholderDecoder struct {
- decoder ValDecoder
-}
-
-func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.decoder.Decode(ptr, iter)
-}
-
-type placeholderEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.encoder.Encode(ptr, stream)
-}
-
-func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.encoder.IsEmpty(ptr)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
deleted file mode 100644
index 13a0b7b0..00000000
--- a/vendor/github.com/json-iterator/go/reflect_array.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "unsafe"
-)
-
-func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
- arrayType := typ.(*reflect2.UnsafeArrayType)
- decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
- return &arrayDecoder{arrayType, decoder}
-}
-
-func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
- arrayType := typ.(*reflect2.UnsafeArrayType)
- if arrayType.Len() == 0 {
- return emptyArrayEncoder{}
- }
- encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
- return &arrayEncoder{arrayType, encoder}
-}
-
-type emptyArrayEncoder struct{}
-
-func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteEmptyArray()
-}
-
-func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return true
-}
-
-type arrayEncoder struct {
- arrayType *reflect2.UnsafeArrayType
- elemEncoder ValEncoder
-}
-
-func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteArrayStart()
- elemPtr := unsafe.Pointer(ptr)
- encoder.elemEncoder.Encode(elemPtr, stream)
- for i := 1; i < encoder.arrayType.Len(); i++ {
- stream.WriteMore()
- elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
- encoder.elemEncoder.Encode(elemPtr, stream)
- }
- stream.WriteArrayEnd()
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
- }
-}
-
-func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type arrayDecoder struct {
- arrayType *reflect2.UnsafeArrayType
- elemDecoder ValDecoder
-}
-
-func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.doDecode(ptr, iter)
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
- }
-}
-
-func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
- c := iter.nextToken()
- arrayType := decoder.arrayType
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return
- }
- if c != '[' {
- iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
- return
- }
- c = iter.nextToken()
- if c == ']' {
- return
- }
- iter.unreadByte()
- elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
- decoder.elemDecoder.Decode(elemPtr, iter)
- length := 1
- for c = iter.nextToken(); c == ','; c = iter.nextToken() {
- if length >= arrayType.Len() {
- iter.Skip()
- continue
- }
- idx := length
- length += 1
- elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
- decoder.elemDecoder.Decode(elemPtr, iter)
- }
- if c != ']' {
- iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
deleted file mode 100644
index 8b6bc8b4..00000000
--- a/vendor/github.com/json-iterator/go/reflect_dynamic.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package jsoniter
-
-import (
- "github.com/modern-go/reflect2"
- "reflect"
- "unsafe"
-)
-
-type dynamicEncoder struct {
- valType reflect2.Type
-}
-
-func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- stream.WriteVal(obj)
-}
-
-func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.valType.UnsafeIndirect(ptr) == nil
-}
-
-type efaceDecoder struct {
-}
-
-func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- pObj := (*interface{})(ptr)
- obj := *pObj
- if obj == nil {
- *pObj = iter.Read()
- return
- }
- typ := reflect2.TypeOf(obj)
- if typ.Kind() != reflect.Ptr {
- *pObj = iter.Read()
- return
- }
- ptrType := typ.(*reflect2.UnsafePtrType)
- ptrElemType := ptrType.Elem()
- if iter.WhatIsNext() == NilValue {
- if ptrElemType.Kind() != reflect.Ptr {
- iter.skipFourBytes('n', 'u', 'l', 'l')
- *pObj = nil
- return
- }
- }
- if reflect2.IsNil(obj) {
- obj := ptrElemType.New()
- iter.ReadVal(obj)
- *pObj = obj
- return
- }
- iter.ReadVal(obj)
-}
-
-type ifaceDecoder struct {
- valType *reflect2.UnsafeIFaceType
-}
-
-func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
- return
- }
- obj := decoder.valType.UnsafeIndirect(ptr)
- if reflect2.IsNil(obj) {
- iter.ReportError("decode non empty interface", "can not unmarshal into nil")
- return
- }
- iter.ReadVal(obj)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
deleted file mode 100644
index 74a97bfe..00000000
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "reflect"
- "sort"
- "strings"
- "unicode"
- "unsafe"
-)
-
-var typeDecoders = map[string]ValDecoder{}
-var fieldDecoders = map[string]ValDecoder{}
-var typeEncoders = map[string]ValEncoder{}
-var fieldEncoders = map[string]ValEncoder{}
-var extensions = []Extension{}
-
-// StructDescriptor describe how should we encode/decode the struct
-type StructDescriptor struct {
- Type reflect2.Type
- Fields []*Binding
-}
-
-// GetField get one field from the descriptor by its name.
-// Can not use map here to keep field orders.
-func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
- for _, binding := range structDescriptor.Fields {
- if binding.Field.Name() == fieldName {
- return binding
- }
- }
- return nil
-}
-
-// Binding describe how should we encode/decode the struct field
-type Binding struct {
- levels []int
- Field reflect2.StructField
- FromNames []string
- ToNames []string
- Encoder ValEncoder
- Decoder ValDecoder
-}
-
-// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
-// Can also rename fields by UpdateStructDescriptor.
-type Extension interface {
- UpdateStructDescriptor(structDescriptor *StructDescriptor)
- CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
- CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
- CreateDecoder(typ reflect2.Type) ValDecoder
- CreateEncoder(typ reflect2.Type) ValEncoder
- DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
- DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
-}
-
-// DummyExtension embed this type get dummy implementation for all methods of Extension
-type DummyExtension struct {
-}
-
-// UpdateStructDescriptor No-op
-func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateMapKeyDecoder No-op
-func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// CreateDecoder No-op
-func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateEncoder No-op
-func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// DecorateDecoder No-op
-func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
- return decoder
-}
-
-// DecorateEncoder No-op
-func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
- return encoder
-}
-
-type EncoderExtension map[reflect2.Type]ValEncoder
-
-// UpdateStructDescriptor No-op
-func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateDecoder No-op
-func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateEncoder get encoder from map
-func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
- return extension[typ]
-}
-
-// CreateMapKeyDecoder No-op
-func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// DecorateDecoder No-op
-func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
- return decoder
-}
-
-// DecorateEncoder No-op
-func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
- return encoder
-}
-
-type DecoderExtension map[reflect2.Type]ValDecoder
-
-// UpdateStructDescriptor No-op
-func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateMapKeyDecoder No-op
-func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// CreateDecoder get decoder from map
-func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
- return extension[typ]
-}
-
-// CreateEncoder No-op
-func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// DecorateDecoder No-op
-func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
- return decoder
-}
-
-// DecorateEncoder No-op
-func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
- return encoder
-}
-
-type funcDecoder struct {
- fun DecoderFunc
-}
-
-func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.fun(ptr, iter)
-}
-
-type funcEncoder struct {
- fun EncoderFunc
- isEmptyFunc func(ptr unsafe.Pointer) bool
-}
-
-func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.fun(ptr, stream)
-}
-
-func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- if encoder.isEmptyFunc == nil {
- return false
- }
- return encoder.isEmptyFunc(ptr)
-}
-
-// DecoderFunc the function form of TypeDecoder
-type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
-
-// EncoderFunc the function form of TypeEncoder
-type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
-
-// RegisterTypeDecoderFunc register TypeDecoder for a type with function
-func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
- typeDecoders[typ] = &funcDecoder{fun}
-}
-
-// RegisterTypeDecoder register TypeDecoder for a typ
-func RegisterTypeDecoder(typ string, decoder ValDecoder) {
- typeDecoders[typ] = decoder
-}
-
-// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
-func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
- RegisterFieldDecoder(typ, field, &funcDecoder{fun})
-}
-
-// RegisterFieldDecoder register TypeDecoder for a struct field
-func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
- fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
-}
-
-// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
-func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
- typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
-}
-
-// RegisterTypeEncoder register TypeEncoder for a type
-func RegisterTypeEncoder(typ string, encoder ValEncoder) {
- typeEncoders[typ] = encoder
-}
-
-// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
-func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
- RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
-}
-
-// RegisterFieldEncoder register TypeEncoder for a struct field
-func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
- fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
-}
-
-// RegisterExtension register extension
-func RegisterExtension(extension Extension) {
- extensions = append(extensions, extension)
-}
-
-func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := _getTypeDecoderFromExtension(ctx, typ)
- if decoder != nil {
- for _, extension := range extensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
- for _, extension := range ctx.extraExtensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- }
- return decoder
-}
-func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
- for _, extension := range extensions {
- decoder := extension.CreateDecoder(typ)
- if decoder != nil {
- return decoder
- }
- }
- decoder := ctx.decoderExtension.CreateDecoder(typ)
- if decoder != nil {
- return decoder
- }
- for _, extension := range ctx.extraExtensions {
- decoder := extension.CreateDecoder(typ)
- if decoder != nil {
- return decoder
- }
- }
- typeName := typ.String()
- decoder = typeDecoders[typeName]
- if decoder != nil {
- return decoder
- }
- if typ.Kind() == reflect.Ptr {
- ptrType := typ.(*reflect2.UnsafePtrType)
- decoder := typeDecoders[ptrType.Elem().String()]
- if decoder != nil {
- return &OptionalDecoder{ptrType.Elem(), decoder}
- }
- }
- return nil
-}
-
-func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := _getTypeEncoderFromExtension(ctx, typ)
- if encoder != nil {
- for _, extension := range extensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
- for _, extension := range ctx.extraExtensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- }
- return encoder
-}
-
-func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
- for _, extension := range extensions {
- encoder := extension.CreateEncoder(typ)
- if encoder != nil {
- return encoder
- }
- }
- encoder := ctx.encoderExtension.CreateEncoder(typ)
- if encoder != nil {
- return encoder
- }
- for _, extension := range ctx.extraExtensions {
- encoder := extension.CreateEncoder(typ)
- if encoder != nil {
- return encoder
- }
- }
- typeName := typ.String()
- encoder = typeEncoders[typeName]
- if encoder != nil {
- return encoder
- }
- if typ.Kind() == reflect.Ptr {
- typePtr := typ.(*reflect2.UnsafePtrType)
- encoder := typeEncoders[typePtr.Elem().String()]
- if encoder != nil {
- return &OptionalEncoder{encoder}
- }
- }
- return nil
-}
-
-func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
- structType := typ.(*reflect2.UnsafeStructType)
- embeddedBindings := []*Binding{}
- bindings := []*Binding{}
- for i := 0; i < structType.NumField(); i++ {
- field := structType.Field(i)
- tag, hastag := field.Tag().Lookup(ctx.getTagKey())
- if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
- continue
- }
- if tag == "-" || field.Name() == "_" {
- continue
- }
- tagParts := strings.Split(tag, ",")
- if field.Anonymous() && (tag == "" || tagParts[0] == "") {
- if field.Type().Kind() == reflect.Struct {
- structDescriptor := describeStruct(ctx, field.Type())
- for _, binding := range structDescriptor.Fields {
- binding.levels = append([]int{i}, binding.levels...)
- omitempty := binding.Encoder.(*structFieldEncoder).omitempty
- binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
- binding.Decoder = &structFieldDecoder{field, binding.Decoder}
- embeddedBindings = append(embeddedBindings, binding)
- }
- continue
- } else if field.Type().Kind() == reflect.Ptr {
- ptrType := field.Type().(*reflect2.UnsafePtrType)
- if ptrType.Elem().Kind() == reflect.Struct {
- structDescriptor := describeStruct(ctx, ptrType.Elem())
- for _, binding := range structDescriptor.Fields {
- binding.levels = append([]int{i}, binding.levels...)
- omitempty := binding.Encoder.(*structFieldEncoder).omitempty
- binding.Encoder = &dereferenceEncoder{binding.Encoder}
- binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
- binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
- binding.Decoder = &structFieldDecoder{field, binding.Decoder}
- embeddedBindings = append(embeddedBindings, binding)
- }
- continue
- }
- }
- }
- fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
- fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
- decoder := fieldDecoders[fieldCacheKey]
- if decoder == nil {
- decoder = decoderOfType(ctx.append(field.Name()), field.Type())
- }
- encoder := fieldEncoders[fieldCacheKey]
- if encoder == nil {
- encoder = encoderOfType(ctx.append(field.Name()), field.Type())
- }
- binding := &Binding{
- Field: field,
- FromNames: fieldNames,
- ToNames: fieldNames,
- Decoder: decoder,
- Encoder: encoder,
- }
- binding.levels = []int{i}
- bindings = append(bindings, binding)
- }
- return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
-}
-func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
- structDescriptor := &StructDescriptor{
- Type: typ,
- Fields: bindings,
- }
- for _, extension := range extensions {
- extension.UpdateStructDescriptor(structDescriptor)
- }
- ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
- ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
- for _, extension := range ctx.extraExtensions {
- extension.UpdateStructDescriptor(structDescriptor)
- }
- processTags(structDescriptor, ctx.frozenConfig)
- // merge normal & embedded bindings & sort with original order
- allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
- sort.Sort(allBindings)
- structDescriptor.Fields = allBindings
- return structDescriptor
-}
-
-type sortableBindings []*Binding
-
-func (bindings sortableBindings) Len() int {
- return len(bindings)
-}
-
-func (bindings sortableBindings) Less(i, j int) bool {
- left := bindings[i].levels
- right := bindings[j].levels
- k := 0
- for {
- if left[k] < right[k] {
- return true
- } else if left[k] > right[k] {
- return false
- }
- k++
- }
-}
-
-func (bindings sortableBindings) Swap(i, j int) {
- bindings[i], bindings[j] = bindings[j], bindings[i]
-}
-
-func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
- for _, binding := range structDescriptor.Fields {
- shouldOmitEmpty := false
- tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
- for _, tagPart := range tagParts[1:] {
- if tagPart == "omitempty" {
- shouldOmitEmpty = true
- } else if tagPart == "string" {
- if binding.Field.Type().Kind() == reflect.String {
- binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
- binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
- } else {
- binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
- binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
- }
- }
- }
- binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
- binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
- }
-}
-
-func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
- // ignore?
- if wholeTag == "-" {
- return []string{}
- }
- // rename?
- var fieldNames []string
- if tagProvidedFieldName == "" {
- fieldNames = []string{originalFieldName}
- } else {
- fieldNames = []string{tagProvidedFieldName}
- }
- // private?
- isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
- if isNotExported {
- fieldNames = []string{}
- }
- return fieldNames
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
deleted file mode 100644
index 98d45c1e..00000000
--- a/vendor/github.com/json-iterator/go/reflect_json_number.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "github.com/modern-go/reflect2"
- "strconv"
- "unsafe"
-)
-
-type Number string
-
-// String returns the literal text of the number.
-func (n Number) String() string { return string(n) }
-
-// Float64 returns the number as a float64.
-func (n Number) Float64() (float64, error) {
- return strconv.ParseFloat(string(n), 64)
-}
-
-// Int64 returns the number as an int64.
-func (n Number) Int64() (int64, error) {
- return strconv.ParseInt(string(n), 10, 64)
-}
-
-func CastJsonNumber(val interface{}) (string, bool) {
- switch typedVal := val.(type) {
- case json.Number:
- return string(typedVal), true
- case Number:
- return string(typedVal), true
- }
- return "", false
-}
-
-var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
-var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
-
-func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ.AssignableTo(jsonNumberType) {
- return &jsonNumberCodec{}
- }
- if typ.AssignableTo(jsoniterNumberType) {
- return &jsoniterNumberCodec{}
- }
- return nil
-}
-
-func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ.AssignableTo(jsonNumberType) {
- return &jsonNumberCodec{}
- }
- if typ.AssignableTo(jsoniterNumberType) {
- return &jsoniterNumberCodec{}
- }
- return nil
-}
-
-type jsonNumberCodec struct {
-}
-
-func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- switch iter.WhatIsNext() {
- case StringValue:
- *((*json.Number)(ptr)) = json.Number(iter.ReadString())
- case NilValue:
- iter.skipFourBytes('n', 'u', 'l', 'l')
- *((*json.Number)(ptr)) = ""
- default:
- *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
- }
-}
-
-func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- number := *((*json.Number)(ptr))
- if len(number) == 0 {
- stream.writeByte('0')
- } else {
- stream.WriteRaw(string(number))
- }
-}
-
-func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*json.Number)(ptr))) == 0
-}
-
-type jsoniterNumberCodec struct {
-}
-
-func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- switch iter.WhatIsNext() {
- case StringValue:
- *((*Number)(ptr)) = Number(iter.ReadString())
- case NilValue:
- iter.skipFourBytes('n', 'u', 'l', 'l')
- *((*Number)(ptr)) = ""
- default:
- *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
- }
-}
-
-func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- number := *((*Number)(ptr))
- if len(number) == 0 {
- stream.writeByte('0')
- } else {
- stream.WriteRaw(string(number))
- }
-}
-
-func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*Number)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
deleted file mode 100644
index eba434f2..00000000
--- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "github.com/modern-go/reflect2"
- "unsafe"
-)
-
-var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
-var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
-
-func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ == jsonRawMessageType {
- return &jsonRawMessageCodec{}
- }
- if typ == jsoniterRawMessageType {
- return &jsoniterRawMessageCodec{}
- }
- return nil
-}
-
-func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ == jsonRawMessageType {
- return &jsonRawMessageCodec{}
- }
- if typ == jsoniterRawMessageType {
- return &jsoniterRawMessageCodec{}
- }
- return nil
-}
-
-type jsonRawMessageCodec struct {
-}
-
-func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- *((*json.RawMessage)(ptr)) = nil
- } else {
- *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes()
- }
-}
-
-func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*json.RawMessage)(ptr)) == nil {
- stream.WriteNil()
- } else {
- stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
- }
-}
-
-func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*json.RawMessage)(ptr))) == 0
-}
-
-type jsoniterRawMessageCodec struct {
-}
-
-func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- *((*RawMessage)(ptr)) = nil
- } else {
- *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes()
- }
-}
-
-func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*RawMessage)(ptr)) == nil {
- stream.WriteNil()
- } else {
- stream.WriteRaw(string(*((*RawMessage)(ptr))))
- }
-}
-
-func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*RawMessage)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
deleted file mode 100644
index 58296713..00000000
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "reflect"
- "sort"
- "unsafe"
-)
-
-func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
- mapType := typ.(*reflect2.UnsafeMapType)
- keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
- elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
- return &mapDecoder{
- mapType: mapType,
- keyType: mapType.Key(),
- elemType: mapType.Elem(),
- keyDecoder: keyDecoder,
- elemDecoder: elemDecoder,
- }
-}
-
-func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
- mapType := typ.(*reflect2.UnsafeMapType)
- if ctx.sortMapKeys {
- return &sortKeysMapEncoder{
- mapType: mapType,
- keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
- elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
- }
- }
- return &mapEncoder{
- mapType: mapType,
- keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
- elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
- }
-}
-
-func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
- if decoder != nil {
- return decoder
- }
- for _, extension := range ctx.extraExtensions {
- decoder := extension.CreateMapKeyDecoder(typ)
- if decoder != nil {
- return decoder
- }
- }
-
- ptrType := reflect2.PtrTo(typ)
- if ptrType.Implements(unmarshalerType) {
- return &referenceDecoder{
- &unmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(unmarshalerType) {
- return &unmarshalerDecoder{
- valType: typ,
- }
- }
- if ptrType.Implements(textUnmarshalerType) {
- return &referenceDecoder{
- &textUnmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(textUnmarshalerType) {
- return &textUnmarshalerDecoder{
- valType: typ,
- }
- }
-
- switch typ.Kind() {
- case reflect.String:
- return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
- case reflect.Bool,
- reflect.Uint8, reflect.Int8,
- reflect.Uint16, reflect.Int16,
- reflect.Uint32, reflect.Int32,
- reflect.Uint64, reflect.Int64,
- reflect.Uint, reflect.Int,
- reflect.Float32, reflect.Float64,
- reflect.Uintptr:
- typ = reflect2.DefaultTypeOfKind(typ.Kind())
- return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
- default:
- return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
- }
-}
-
-func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
- if encoder != nil {
- return encoder
- }
- for _, extension := range ctx.extraExtensions {
- encoder := extension.CreateMapKeyEncoder(typ)
- if encoder != nil {
- return encoder
- }
- }
-
- if typ == textMarshalerType {
- return &directTextMarshalerEncoder{
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
- if typ.Implements(textMarshalerType) {
- return &textMarshalerEncoder{
- valType: typ,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
-
- switch typ.Kind() {
- case reflect.String:
- return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
- case reflect.Bool,
- reflect.Uint8, reflect.Int8,
- reflect.Uint16, reflect.Int16,
- reflect.Uint32, reflect.Int32,
- reflect.Uint64, reflect.Int64,
- reflect.Uint, reflect.Int,
- reflect.Float32, reflect.Float64,
- reflect.Uintptr:
- typ = reflect2.DefaultTypeOfKind(typ.Kind())
- return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
- default:
- if typ.Kind() == reflect.Interface {
- return &dynamicMapKeyEncoder{ctx, typ}
- }
- return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
- }
-}
-
-type mapDecoder struct {
- mapType *reflect2.UnsafeMapType
- keyType reflect2.Type
- elemType reflect2.Type
- keyDecoder ValDecoder
- elemDecoder ValDecoder
-}
-
-func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- mapType := decoder.mapType
- c := iter.nextToken()
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- *(*unsafe.Pointer)(ptr) = nil
- mapType.UnsafeSet(ptr, mapType.UnsafeNew())
- return
- }
- if mapType.UnsafeIsNil(ptr) {
- mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
- }
- if c != '{' {
- iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
- return
- }
- c = iter.nextToken()
- if c == '}' {
- return
- }
- iter.unreadByte()
- key := decoder.keyType.UnsafeNew()
- decoder.keyDecoder.Decode(key, iter)
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- return
- }
- elem := decoder.elemType.UnsafeNew()
- decoder.elemDecoder.Decode(elem, iter)
- decoder.mapType.UnsafeSetIndex(ptr, key, elem)
- for c = iter.nextToken(); c == ','; c = iter.nextToken() {
- key := decoder.keyType.UnsafeNew()
- decoder.keyDecoder.Decode(key, iter)
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- return
- }
- elem := decoder.elemType.UnsafeNew()
- decoder.elemDecoder.Decode(elem, iter)
- decoder.mapType.UnsafeSetIndex(ptr, key, elem)
- }
- if c != '}' {
- iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
- }
-}
-
-type numericMapKeyDecoder struct {
- decoder ValDecoder
-}
-
-func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- c := iter.nextToken()
- if c != '"' {
- iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
- return
- }
- decoder.decoder.Decode(ptr, iter)
- c = iter.nextToken()
- if c != '"' {
- iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
- return
- }
-}
-
-type numericMapKeyEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.writeByte('"')
- encoder.encoder.Encode(ptr, stream)
- stream.writeByte('"')
-}
-
-func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type dynamicMapKeyEncoder struct {
- ctx *ctx
- valType reflect2.Type
-}
-
-func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
-}
-
-func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- obj := encoder.valType.UnsafeIndirect(ptr)
- return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
-}
-
-type mapEncoder struct {
- mapType *reflect2.UnsafeMapType
- keyEncoder ValEncoder
- elemEncoder ValEncoder
-}
-
-func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *(*unsafe.Pointer)(ptr) == nil {
- stream.WriteNil()
- return
- }
- stream.WriteObjectStart()
- iter := encoder.mapType.UnsafeIterate(ptr)
- for i := 0; iter.HasNext(); i++ {
- if i != 0 {
- stream.WriteMore()
- }
- key, elem := iter.UnsafeNext()
- encoder.keyEncoder.Encode(key, stream)
- if stream.indention > 0 {
- stream.writeTwoBytes(byte(':'), byte(' '))
- } else {
- stream.writeByte(':')
- }
- encoder.elemEncoder.Encode(elem, stream)
- }
- stream.WriteObjectEnd()
-}
-
-func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- iter := encoder.mapType.UnsafeIterate(ptr)
- return !iter.HasNext()
-}
-
-type sortKeysMapEncoder struct {
- mapType *reflect2.UnsafeMapType
- keyEncoder ValEncoder
- elemEncoder ValEncoder
-}
-
-func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *(*unsafe.Pointer)(ptr) == nil {
- stream.WriteNil()
- return
- }
- stream.WriteObjectStart()
- mapIter := encoder.mapType.UnsafeIterate(ptr)
- subStream := stream.cfg.BorrowStream(nil)
- subStream.Attachment = stream.Attachment
- subIter := stream.cfg.BorrowIterator(nil)
- keyValues := encodedKeyValues{}
- for mapIter.HasNext() {
- key, elem := mapIter.UnsafeNext()
- subStreamIndex := subStream.Buffered()
- encoder.keyEncoder.Encode(key, subStream)
- if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
- stream.Error = subStream.Error
- }
- encodedKey := subStream.Buffer()[subStreamIndex:]
- subIter.ResetBytes(encodedKey)
- decodedKey := subIter.ReadString()
- if stream.indention > 0 {
- subStream.writeTwoBytes(byte(':'), byte(' '))
- } else {
- subStream.writeByte(':')
- }
- encoder.elemEncoder.Encode(elem, subStream)
- keyValues = append(keyValues, encodedKV{
- key: decodedKey,
- keyValue: subStream.Buffer()[subStreamIndex:],
- })
- }
- sort.Sort(keyValues)
- for i, keyValue := range keyValues {
- if i != 0 {
- stream.WriteMore()
- }
- stream.Write(keyValue.keyValue)
- }
- if subStream.Error != nil && stream.Error == nil {
- stream.Error = subStream.Error
- }
- stream.WriteObjectEnd()
- stream.cfg.ReturnStream(subStream)
- stream.cfg.ReturnIterator(subIter)
-}
-
-func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- iter := encoder.mapType.UnsafeIterate(ptr)
- return !iter.HasNext()
-}
-
-type encodedKeyValues []encodedKV
-
-type encodedKV struct {
- key string
- keyValue []byte
-}
-
-func (sv encodedKeyValues) Len() int { return len(sv) }
-func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
-func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
deleted file mode 100644
index 3e21f375..00000000
--- a/vendor/github.com/json-iterator/go/reflect_marshaler.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package jsoniter
-
-import (
- "encoding"
- "encoding/json"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
-var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
-var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
-var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
-
-func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
- ptrType := reflect2.PtrTo(typ)
- if ptrType.Implements(unmarshalerType) {
- return &referenceDecoder{
- &unmarshalerDecoder{ptrType},
- }
- }
- if ptrType.Implements(textUnmarshalerType) {
- return &referenceDecoder{
- &textUnmarshalerDecoder{ptrType},
- }
- }
- return nil
-}
-
-func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ == marshalerType {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &directMarshalerEncoder{
- checkIsEmpty: checkIsEmpty,
- }
- return encoder
- }
- if typ.Implements(marshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &marshalerEncoder{
- valType: typ,
- checkIsEmpty: checkIsEmpty,
- }
- return encoder
- }
- ptrType := reflect2.PtrTo(typ)
- if ctx.prefix != "" && ptrType.Implements(marshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
- var encoder ValEncoder = &marshalerEncoder{
- valType: ptrType,
- checkIsEmpty: checkIsEmpty,
- }
- return &referenceEncoder{encoder}
- }
- if typ == textMarshalerType {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &directTextMarshalerEncoder{
- checkIsEmpty: checkIsEmpty,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- return encoder
- }
- if typ.Implements(textMarshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &textMarshalerEncoder{
- valType: typ,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- checkIsEmpty: checkIsEmpty,
- }
- return encoder
- }
- // if prefix is empty, the type is the root type
- if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
- var encoder ValEncoder = &textMarshalerEncoder{
- valType: ptrType,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- checkIsEmpty: checkIsEmpty,
- }
- return &referenceEncoder{encoder}
- }
- return nil
-}
-
-type marshalerEncoder struct {
- checkIsEmpty checkIsEmpty
- valType reflect2.Type
-}
-
-func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
- stream.WriteNil()
- return
- }
- marshaler := obj.(json.Marshaler)
- bytes, err := marshaler.MarshalJSON()
- if err != nil {
- stream.Error = err
- } else {
- // html escape was already done by jsoniter
- // but the extra '\n' should be trimed
- l := len(bytes)
- if l > 0 && bytes[l-1] == '\n' {
- bytes = bytes[:l-1]
- }
- stream.Write(bytes)
- }
-}
-
-func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type directMarshalerEncoder struct {
- checkIsEmpty checkIsEmpty
-}
-
-func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- marshaler := *(*json.Marshaler)(ptr)
- if marshaler == nil {
- stream.WriteNil()
- return
- }
- bytes, err := marshaler.MarshalJSON()
- if err != nil {
- stream.Error = err
- } else {
- stream.Write(bytes)
- }
-}
-
-func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type textMarshalerEncoder struct {
- valType reflect2.Type
- stringEncoder ValEncoder
- checkIsEmpty checkIsEmpty
-}
-
-func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
- stream.WriteNil()
- return
- }
- marshaler := (obj).(encoding.TextMarshaler)
- bytes, err := marshaler.MarshalText()
- if err != nil {
- stream.Error = err
- } else {
- str := string(bytes)
- encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
- }
-}
-
-func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type directTextMarshalerEncoder struct {
- stringEncoder ValEncoder
- checkIsEmpty checkIsEmpty
-}
-
-func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- marshaler := *(*encoding.TextMarshaler)(ptr)
- if marshaler == nil {
- stream.WriteNil()
- return
- }
- bytes, err := marshaler.MarshalText()
- if err != nil {
- stream.Error = err
- } else {
- str := string(bytes)
- encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
- }
-}
-
-func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type unmarshalerDecoder struct {
- valType reflect2.Type
-}
-
-func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- valType := decoder.valType
- obj := valType.UnsafeIndirect(ptr)
- unmarshaler := obj.(json.Unmarshaler)
- iter.nextToken()
- iter.unreadByte() // skip spaces
- bytes := iter.SkipAndReturnBytes()
- err := unmarshaler.UnmarshalJSON(bytes)
- if err != nil {
- iter.ReportError("unmarshalerDecoder", err.Error())
- }
-}
-
-type textUnmarshalerDecoder struct {
- valType reflect2.Type
-}
-
-func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- valType := decoder.valType
- obj := valType.UnsafeIndirect(ptr)
- if reflect2.IsNil(obj) {
- ptrType := valType.(*reflect2.UnsafePtrType)
- elemType := ptrType.Elem()
- elem := elemType.UnsafeNew()
- ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
- obj = valType.UnsafeIndirect(ptr)
- }
- unmarshaler := (obj).(encoding.TextUnmarshaler)
- str := iter.ReadString()
- err := unmarshaler.UnmarshalText([]byte(str))
- if err != nil {
- iter.ReportError("textUnmarshalerDecoder", err.Error())
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
deleted file mode 100644
index f88722d1..00000000
--- a/vendor/github.com/json-iterator/go/reflect_native.go
+++ /dev/null
@@ -1,453 +0,0 @@
-package jsoniter
-
-import (
- "encoding/base64"
- "reflect"
- "strconv"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-const ptrSize = 32 << uintptr(^uintptr(0)>>63)
-
-func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
- sliceDecoder := decoderOfSlice(ctx, typ)
- return &base64Codec{sliceDecoder: sliceDecoder}
- }
- typeName := typ.String()
- kind := typ.Kind()
- switch kind {
- case reflect.String:
- if typeName != "string" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
- }
- return &stringCodec{}
- case reflect.Int:
- if typeName != "int" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &int32Codec{}
- }
- return &int64Codec{}
- case reflect.Int8:
- if typeName != "int8" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
- }
- return &int8Codec{}
- case reflect.Int16:
- if typeName != "int16" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
- }
- return &int16Codec{}
- case reflect.Int32:
- if typeName != "int32" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
- }
- return &int32Codec{}
- case reflect.Int64:
- if typeName != "int64" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
- }
- return &int64Codec{}
- case reflect.Uint:
- if typeName != "uint" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint8:
- if typeName != "uint8" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
- }
- return &uint8Codec{}
- case reflect.Uint16:
- if typeName != "uint16" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
- }
- return &uint16Codec{}
- case reflect.Uint32:
- if typeName != "uint32" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
- }
- return &uint32Codec{}
- case reflect.Uintptr:
- if typeName != "uintptr" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
- }
- if ptrSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint64:
- if typeName != "uint64" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
- }
- return &uint64Codec{}
- case reflect.Float32:
- if typeName != "float32" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
- }
- return &float32Codec{}
- case reflect.Float64:
- if typeName != "float64" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
- }
- return &float64Codec{}
- case reflect.Bool:
- if typeName != "bool" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
- }
- return &boolCodec{}
- }
- return nil
-}
-
-func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
- sliceDecoder := decoderOfSlice(ctx, typ)
- return &base64Codec{sliceDecoder: sliceDecoder}
- }
- typeName := typ.String()
- switch typ.Kind() {
- case reflect.String:
- if typeName != "string" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
- }
- return &stringCodec{}
- case reflect.Int:
- if typeName != "int" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &int32Codec{}
- }
- return &int64Codec{}
- case reflect.Int8:
- if typeName != "int8" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
- }
- return &int8Codec{}
- case reflect.Int16:
- if typeName != "int16" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
- }
- return &int16Codec{}
- case reflect.Int32:
- if typeName != "int32" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
- }
- return &int32Codec{}
- case reflect.Int64:
- if typeName != "int64" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
- }
- return &int64Codec{}
- case reflect.Uint:
- if typeName != "uint" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint8:
- if typeName != "uint8" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
- }
- return &uint8Codec{}
- case reflect.Uint16:
- if typeName != "uint16" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
- }
- return &uint16Codec{}
- case reflect.Uint32:
- if typeName != "uint32" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
- }
- return &uint32Codec{}
- case reflect.Uintptr:
- if typeName != "uintptr" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
- }
- if ptrSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint64:
- if typeName != "uint64" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
- }
- return &uint64Codec{}
- case reflect.Float32:
- if typeName != "float32" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
- }
- return &float32Codec{}
- case reflect.Float64:
- if typeName != "float64" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
- }
- return &float64Codec{}
- case reflect.Bool:
- if typeName != "bool" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
- }
- return &boolCodec{}
- }
- return nil
-}
-
-type stringCodec struct {
-}
-
-func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *((*string)(ptr)) = iter.ReadString()
-}
-
-func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- str := *((*string)(ptr))
- stream.WriteString(str)
-}
-
-func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*string)(ptr)) == ""
-}
-
-type int8Codec struct {
-}
-
-func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int8)(ptr)) = iter.ReadInt8()
- }
-}
-
-func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt8(*((*int8)(ptr)))
-}
-
-func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int8)(ptr)) == 0
-}
-
-type int16Codec struct {
-}
-
-func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int16)(ptr)) = iter.ReadInt16()
- }
-}
-
-func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt16(*((*int16)(ptr)))
-}
-
-func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int16)(ptr)) == 0
-}
-
-type int32Codec struct {
-}
-
-func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int32)(ptr)) = iter.ReadInt32()
- }
-}
-
-func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt32(*((*int32)(ptr)))
-}
-
-func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int32)(ptr)) == 0
-}
-
-type int64Codec struct {
-}
-
-func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int64)(ptr)) = iter.ReadInt64()
- }
-}
-
-func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt64(*((*int64)(ptr)))
-}
-
-func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int64)(ptr)) == 0
-}
-
-type uint8Codec struct {
-}
-
-func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint8)(ptr)) = iter.ReadUint8()
- }
-}
-
-func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint8(*((*uint8)(ptr)))
-}
-
-func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint8)(ptr)) == 0
-}
-
-type uint16Codec struct {
-}
-
-func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint16)(ptr)) = iter.ReadUint16()
- }
-}
-
-func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint16(*((*uint16)(ptr)))
-}
-
-func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint16)(ptr)) == 0
-}
-
-type uint32Codec struct {
-}
-
-func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint32)(ptr)) = iter.ReadUint32()
- }
-}
-
-func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint32(*((*uint32)(ptr)))
-}
-
-func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint32)(ptr)) == 0
-}
-
-type uint64Codec struct {
-}
-
-func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint64)(ptr)) = iter.ReadUint64()
- }
-}
-
-func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint64(*((*uint64)(ptr)))
-}
-
-func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint64)(ptr)) == 0
-}
-
-type float32Codec struct {
-}
-
-func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*float32)(ptr)) = iter.ReadFloat32()
- }
-}
-
-func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat32(*((*float32)(ptr)))
-}
-
-func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float32)(ptr)) == 0
-}
-
-type float64Codec struct {
-}
-
-func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*float64)(ptr)) = iter.ReadFloat64()
- }
-}
-
-func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat64(*((*float64)(ptr)))
-}
-
-func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float64)(ptr)) == 0
-}
-
-type boolCodec struct {
-}
-
-func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*bool)(ptr)) = iter.ReadBool()
- }
-}
-
-func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteBool(*((*bool)(ptr)))
-}
-
-func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return !(*((*bool)(ptr)))
-}
-
-type base64Codec struct {
- sliceType *reflect2.UnsafeSliceType
- sliceDecoder ValDecoder
-}
-
-func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- codec.sliceType.UnsafeSetNil(ptr)
- return
- }
- switch iter.WhatIsNext() {
- case StringValue:
- src := iter.ReadString()
- dst, err := base64.StdEncoding.DecodeString(src)
- if err != nil {
- iter.ReportError("decode base64", err.Error())
- } else {
- codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
- }
- case ArrayValue:
- codec.sliceDecoder.Decode(ptr, iter)
- default:
- iter.ReportError("base64Codec", "invalid input")
- }
-}
-
-func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- if codec.sliceType.UnsafeIsNil(ptr) {
- stream.WriteNil()
- return
- }
- src := *((*[]byte)(ptr))
- encoding := base64.StdEncoding
- stream.writeByte('"')
- if len(src) != 0 {
- size := encoding.EncodedLen(len(src))
- buf := make([]byte, size)
- encoding.Encode(buf, src)
- stream.buf = append(stream.buf, buf...)
- }
- stream.writeByte('"')
-}
-
-func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*[]byte)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
deleted file mode 100644
index fa71f474..00000000
--- a/vendor/github.com/json-iterator/go/reflect_optional.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package jsoniter
-
-import (
- "github.com/modern-go/reflect2"
- "unsafe"
-)
-
-func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
- ptrType := typ.(*reflect2.UnsafePtrType)
- elemType := ptrType.Elem()
- decoder := decoderOfType(ctx, elemType)
- return &OptionalDecoder{elemType, decoder}
-}
-
-func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
- ptrType := typ.(*reflect2.UnsafePtrType)
- elemType := ptrType.Elem()
- elemEncoder := encoderOfType(ctx, elemType)
- encoder := &OptionalEncoder{elemEncoder}
- return encoder
-}
-
-type OptionalDecoder struct {
- ValueType reflect2.Type
- ValueDecoder ValDecoder
-}
-
-func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- *((*unsafe.Pointer)(ptr)) = nil
- } else {
- if *((*unsafe.Pointer)(ptr)) == nil {
- //pointer to null, we have to allocate memory to hold the value
- newPtr := decoder.ValueType.UnsafeNew()
- decoder.ValueDecoder.Decode(newPtr, iter)
- *((*unsafe.Pointer)(ptr)) = newPtr
- } else {
- //reuse existing instance
- decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
- }
- }
-}
-
-type dereferenceDecoder struct {
- // only to deference a pointer
- valueType reflect2.Type
- valueDecoder ValDecoder
-}
-
-func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if *((*unsafe.Pointer)(ptr)) == nil {
- //pointer to null, we have to allocate memory to hold the value
- newPtr := decoder.valueType.UnsafeNew()
- decoder.valueDecoder.Decode(newPtr, iter)
- *((*unsafe.Pointer)(ptr)) = newPtr
- } else {
- //reuse existing instance
- decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
- }
-}
-
-type OptionalEncoder struct {
- ValueEncoder ValEncoder
-}
-
-func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*unsafe.Pointer)(ptr)) == nil {
- stream.WriteNil()
- } else {
- encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
- }
-}
-
-func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*unsafe.Pointer)(ptr)) == nil
-}
-
-type dereferenceEncoder struct {
- ValueEncoder ValEncoder
-}
-
-func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*unsafe.Pointer)(ptr)) == nil {
- stream.WriteNil()
- } else {
- encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
- }
-}
-
-func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- dePtr := *((*unsafe.Pointer)(ptr))
- if dePtr == nil {
- return true
- }
- return encoder.ValueEncoder.IsEmpty(dePtr)
-}
-
-func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
- deReferenced := *((*unsafe.Pointer)(ptr))
- if deReferenced == nil {
- return true
- }
- isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
- if !converted {
- return false
- }
- fieldPtr := unsafe.Pointer(deReferenced)
- return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
-}
-
-type referenceEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
-}
-
-func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
-}
-
-type referenceDecoder struct {
- decoder ValDecoder
-}
-
-func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
deleted file mode 100644
index 9441d79d..00000000
--- a/vendor/github.com/json-iterator/go/reflect_slice.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "unsafe"
-)
-
-func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
- sliceType := typ.(*reflect2.UnsafeSliceType)
- decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
- return &sliceDecoder{sliceType, decoder}
-}
-
-func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
- sliceType := typ.(*reflect2.UnsafeSliceType)
- encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
- return &sliceEncoder{sliceType, encoder}
-}
-
-type sliceEncoder struct {
- sliceType *reflect2.UnsafeSliceType
- elemEncoder ValEncoder
-}
-
-func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if encoder.sliceType.UnsafeIsNil(ptr) {
- stream.WriteNil()
- return
- }
- length := encoder.sliceType.UnsafeLengthOf(ptr)
- if length == 0 {
- stream.WriteEmptyArray()
- return
- }
- stream.WriteArrayStart()
- encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
- for i := 1; i < length; i++ {
- stream.WriteMore()
- elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
- encoder.elemEncoder.Encode(elemPtr, stream)
- }
- stream.WriteArrayEnd()
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
- }
-}
-
-func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.sliceType.UnsafeLengthOf(ptr) == 0
-}
-
-type sliceDecoder struct {
- sliceType *reflect2.UnsafeSliceType
- elemDecoder ValDecoder
-}
-
-func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.doDecode(ptr, iter)
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
- }
-}
-
-func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
- c := iter.nextToken()
- sliceType := decoder.sliceType
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- sliceType.UnsafeSetNil(ptr)
- return
- }
- if c != '[' {
- iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
- return
- }
- c = iter.nextToken()
- if c == ']' {
- sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
- return
- }
- iter.unreadByte()
- sliceType.UnsafeGrow(ptr, 1)
- elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
- decoder.elemDecoder.Decode(elemPtr, iter)
- length := 1
- for c = iter.nextToken(); c == ','; c = iter.nextToken() {
- idx := length
- length += 1
- sliceType.UnsafeGrow(ptr, length)
- elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
- decoder.elemDecoder.Decode(elemPtr, iter)
- }
- if c != ']' {
- iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
deleted file mode 100644
index 92ae912d..00000000
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ /dev/null
@@ -1,1097 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "io"
- "strings"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
- bindings := map[string]*Binding{}
- structDescriptor := describeStruct(ctx, typ)
- for _, binding := range structDescriptor.Fields {
- for _, fromName := range binding.FromNames {
- old := bindings[fromName]
- if old == nil {
- bindings[fromName] = binding
- continue
- }
- ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
- if ignoreOld {
- delete(bindings, fromName)
- }
- if !ignoreNew {
- bindings[fromName] = binding
- }
- }
- }
- fields := map[string]*structFieldDecoder{}
- for k, binding := range bindings {
- fields[k] = binding.Decoder.(*structFieldDecoder)
- }
-
- if !ctx.caseSensitive() {
- for k, binding := range bindings {
- if _, found := fields[strings.ToLower(k)]; !found {
- fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
- }
- }
- }
-
- return createStructDecoder(ctx, typ, fields)
-}
-
-func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
- if ctx.disallowUnknownFields {
- return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
- }
- knownHash := map[int64]struct{}{
- 0: {},
- }
-
- switch len(fields) {
- case 0:
- return &skipObjectDecoder{typ}
- case 1:
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
- }
- case 2:
- var fieldHash1 int64
- var fieldHash2 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldHash1 == 0 {
- fieldHash1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else {
- fieldHash2 = fieldHash
- fieldDecoder2 = fieldDecoder
- }
- }
- return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
- case 3:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- }
- }
- return &threeFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3}
- case 4:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- }
- }
- return &fourFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4}
- case 5:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- }
- }
- return &fiveFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5}
- case 6:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- }
- }
- return &sixFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6}
- case 7:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- }
- }
- return &sevenFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7}
- case 8:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldName8 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- var fieldDecoder8 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else if fieldName7 == 0 {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- } else {
- fieldName8 = fieldHash
- fieldDecoder8 = fieldDecoder
- }
- }
- return &eightFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7,
- fieldName8, fieldDecoder8}
- case 9:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldName8 int64
- var fieldName9 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- var fieldDecoder8 *structFieldDecoder
- var fieldDecoder9 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else if fieldName7 == 0 {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- } else if fieldName8 == 0 {
- fieldName8 = fieldHash
- fieldDecoder8 = fieldDecoder
- } else {
- fieldName9 = fieldHash
- fieldDecoder9 = fieldDecoder
- }
- }
- return &nineFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7,
- fieldName8, fieldDecoder8,
- fieldName9, fieldDecoder9}
- case 10:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldName8 int64
- var fieldName9 int64
- var fieldName10 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- var fieldDecoder8 *structFieldDecoder
- var fieldDecoder9 *structFieldDecoder
- var fieldDecoder10 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else if fieldName7 == 0 {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- } else if fieldName8 == 0 {
- fieldName8 = fieldHash
- fieldDecoder8 = fieldDecoder
- } else if fieldName9 == 0 {
- fieldName9 = fieldHash
- fieldDecoder9 = fieldDecoder
- } else {
- fieldName10 = fieldHash
- fieldDecoder10 = fieldDecoder
- }
- }
- return &tenFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7,
- fieldName8, fieldDecoder8,
- fieldName9, fieldDecoder9,
- fieldName10, fieldDecoder10}
- }
- return &generalStructDecoder{typ, fields, false}
-}
-
-type generalStructDecoder struct {
- typ reflect2.Type
- fields map[string]*structFieldDecoder
- disallowUnknownFields bool
-}
-
-func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- var c byte
- for c = ','; c == ','; c = iter.nextToken() {
- decoder.decodeOneField(ptr, iter)
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- if c != '}' {
- iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
- }
- iter.decrementDepth()
-}
-
-func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
- var field string
- var fieldDecoder *structFieldDecoder
- if iter.cfg.objectFieldMustBeSimpleString {
- fieldBytes := iter.ReadStringAsSlice()
- field = *(*string)(unsafe.Pointer(&fieldBytes))
- fieldDecoder = decoder.fields[field]
- if fieldDecoder == nil && !iter.cfg.caseSensitive {
- fieldDecoder = decoder.fields[strings.ToLower(field)]
- }
- } else {
- field = iter.ReadString()
- fieldDecoder = decoder.fields[field]
- if fieldDecoder == nil && !iter.cfg.caseSensitive {
- fieldDecoder = decoder.fields[strings.ToLower(field)]
- }
- }
- if fieldDecoder == nil {
- if decoder.disallowUnknownFields {
- msg := "found unknown field: " + field
- iter.ReportError("ReadObject", msg)
- }
- c := iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- iter.Skip()
- return
- }
- c := iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- fieldDecoder.Decode(ptr, iter)
-}
-
-type skipObjectDecoder struct {
- typ reflect2.Type
-}
-
-func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- valueType := iter.WhatIsNext()
- if valueType != ObjectValue && valueType != NilValue {
- iter.ReportError("skipObjectDecoder", "expect object or null")
- return
- }
- iter.Skip()
-}
-
-type oneFieldStructDecoder struct {
- typ reflect2.Type
- fieldHash int64
- fieldDecoder *structFieldDecoder
-}
-
-func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- if iter.readFieldHash() == decoder.fieldHash {
- decoder.fieldDecoder.Decode(ptr, iter)
- } else {
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type twoFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
-}
-
-func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type threeFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
-}
-
-func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type fourFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
-}
-
-func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type fiveFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
-}
-
-func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type sixFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
-}
-
-func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type sevenFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
-}
-
-func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type eightFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
- fieldHash8 int64
- fieldDecoder8 *structFieldDecoder
-}
-
-func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- case decoder.fieldHash8:
- decoder.fieldDecoder8.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type nineFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
- fieldHash8 int64
- fieldDecoder8 *structFieldDecoder
- fieldHash9 int64
- fieldDecoder9 *structFieldDecoder
-}
-
-func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- case decoder.fieldHash8:
- decoder.fieldDecoder8.Decode(ptr, iter)
- case decoder.fieldHash9:
- decoder.fieldDecoder9.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type tenFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
- fieldHash8 int64
- fieldDecoder8 *structFieldDecoder
- fieldHash9 int64
- fieldDecoder9 *structFieldDecoder
- fieldHash10 int64
- fieldDecoder10 *structFieldDecoder
-}
-
-func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- case decoder.fieldHash8:
- decoder.fieldDecoder8.Decode(ptr, iter)
- case decoder.fieldHash9:
- decoder.fieldDecoder9.Decode(ptr, iter)
- case decoder.fieldHash10:
- decoder.fieldDecoder10.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type structFieldDecoder struct {
- field reflect2.StructField
- fieldDecoder ValDecoder
-}
-
-func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- fieldPtr := decoder.field.UnsafeGet(ptr)
- decoder.fieldDecoder.Decode(fieldPtr, iter)
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
- }
-}
-
-type stringModeStringDecoder struct {
- elemDecoder ValDecoder
- cfg *frozenConfig
-}
-
-func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.elemDecoder.Decode(ptr, iter)
- str := *((*string)(ptr))
- tempIter := decoder.cfg.BorrowIterator([]byte(str))
- defer decoder.cfg.ReturnIterator(tempIter)
- *((*string)(ptr)) = tempIter.ReadString()
-}
-
-type stringModeNumberDecoder struct {
- elemDecoder ValDecoder
-}
-
-func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.WhatIsNext() == NilValue {
- decoder.elemDecoder.Decode(ptr, iter)
- return
- }
-
- c := iter.nextToken()
- if c != '"' {
- iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
- return
- }
- decoder.elemDecoder.Decode(ptr, iter)
- if iter.Error != nil {
- return
- }
- c = iter.readByte()
- if c != '"' {
- iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
deleted file mode 100644
index 152e3ef5..00000000
--- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "reflect"
- "unsafe"
-)
-
-func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
- type bindingTo struct {
- binding *Binding
- toName string
- ignored bool
- }
- orderedBindings := []*bindingTo{}
- structDescriptor := describeStruct(ctx, typ)
- for _, binding := range structDescriptor.Fields {
- for _, toName := range binding.ToNames {
- new := &bindingTo{
- binding: binding,
- toName: toName,
- }
- for _, old := range orderedBindings {
- if old.toName != toName {
- continue
- }
- old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
- }
- orderedBindings = append(orderedBindings, new)
- }
- }
- if len(orderedBindings) == 0 {
- return &emptyStructEncoder{}
- }
- finalOrderedFields := []structFieldTo{}
- for _, bindingTo := range orderedBindings {
- if !bindingTo.ignored {
- finalOrderedFields = append(finalOrderedFields, structFieldTo{
- encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
- toName: bindingTo.toName,
- })
- }
- }
- return &structEncoder{typ, finalOrderedFields}
-}
-
-func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
- encoder := createEncoderOfNative(ctx, typ)
- if encoder != nil {
- return encoder
- }
- kind := typ.Kind()
- switch kind {
- case reflect.Interface:
- return &dynamicEncoder{typ}
- case reflect.Struct:
- return &structEncoder{typ: typ}
- case reflect.Array:
- return &arrayEncoder{}
- case reflect.Slice:
- return &sliceEncoder{}
- case reflect.Map:
- return encoderOfMap(ctx, typ)
- case reflect.Ptr:
- return &OptionalEncoder{}
- default:
- return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
- }
-}
-
-func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
- newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
- oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
- if newTagged {
- if oldTagged {
- if len(old.levels) > len(new.levels) {
- return true, false
- } else if len(new.levels) > len(old.levels) {
- return false, true
- } else {
- return true, true
- }
- } else {
- return true, false
- }
- } else {
- if oldTagged {
- return true, false
- }
- if len(old.levels) > len(new.levels) {
- return true, false
- } else if len(new.levels) > len(old.levels) {
- return false, true
- } else {
- return true, true
- }
- }
-}
-
-type structFieldEncoder struct {
- field reflect2.StructField
- fieldEncoder ValEncoder
- omitempty bool
-}
-
-func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- fieldPtr := encoder.field.UnsafeGet(ptr)
- encoder.fieldEncoder.Encode(fieldPtr, stream)
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
- }
-}
-
-func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- fieldPtr := encoder.field.UnsafeGet(ptr)
- return encoder.fieldEncoder.IsEmpty(fieldPtr)
-}
-
-func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
- isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
- if !converted {
- return false
- }
- fieldPtr := encoder.field.UnsafeGet(ptr)
- return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
-}
-
-type IsEmbeddedPtrNil interface {
- IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
-}
-
-type structEncoder struct {
- typ reflect2.Type
- fields []structFieldTo
-}
-
-type structFieldTo struct {
- encoder *structFieldEncoder
- toName string
-}
-
-func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteObjectStart()
- isNotFirst := false
- for _, field := range encoder.fields {
- if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
- continue
- }
- if field.encoder.IsEmbeddedPtrNil(ptr) {
- continue
- }
- if isNotFirst {
- stream.WriteMore()
- }
- stream.WriteObjectField(field.toName)
- field.encoder.Encode(ptr, stream)
- isNotFirst = true
- }
- stream.WriteObjectEnd()
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
- }
-}
-
-func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type emptyStructEncoder struct {
-}
-
-func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteEmptyObject()
-}
-
-func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type stringModeNumberEncoder struct {
- elemEncoder ValEncoder
-}
-
-func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.writeByte('"')
- encoder.elemEncoder.Encode(ptr, stream)
- stream.writeByte('"')
-}
-
-func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.elemEncoder.IsEmpty(ptr)
-}
-
-type stringModeStringEncoder struct {
- elemEncoder ValEncoder
- cfg *frozenConfig
-}
-
-func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- tempStream := encoder.cfg.BorrowStream(nil)
- tempStream.Attachment = stream.Attachment
- defer encoder.cfg.ReturnStream(tempStream)
- encoder.elemEncoder.Encode(ptr, tempStream)
- stream.WriteString(string(tempStream.Buffer()))
-}
-
-func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.elemEncoder.IsEmpty(ptr)
-}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
deleted file mode 100644
index 23d8a3ad..00000000
--- a/vendor/github.com/json-iterator/go/stream.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package jsoniter
-
-import (
- "io"
-)
-
-// stream is a io.Writer like object, with JSON specific write functions.
-// Error is not returned as return value, but stored as Error member on this stream instance.
-type Stream struct {
- cfg *frozenConfig
- out io.Writer
- buf []byte
- Error error
- indention int
- Attachment interface{} // open for customized encoder
-}
-
-// NewStream create new stream instance.
-// cfg can be jsoniter.ConfigDefault.
-// out can be nil if write to internal buffer.
-// bufSize is the initial size for the internal buffer in bytes.
-func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
- return &Stream{
- cfg: cfg.(*frozenConfig),
- out: out,
- buf: make([]byte, 0, bufSize),
- Error: nil,
- indention: 0,
- }
-}
-
-// Pool returns a pool can provide more stream with same configuration
-func (stream *Stream) Pool() StreamPool {
- return stream.cfg
-}
-
-// Reset reuse this stream instance by assign a new writer
-func (stream *Stream) Reset(out io.Writer) {
- stream.out = out
- stream.buf = stream.buf[:0]
-}
-
-// Available returns how many bytes are unused in the buffer.
-func (stream *Stream) Available() int {
- return cap(stream.buf) - len(stream.buf)
-}
-
-// Buffered returns the number of bytes that have been written into the current buffer.
-func (stream *Stream) Buffered() int {
- return len(stream.buf)
-}
-
-// Buffer if writer is nil, use this method to take the result
-func (stream *Stream) Buffer() []byte {
- return stream.buf
-}
-
-// SetBuffer allows to append to the internal buffer directly
-func (stream *Stream) SetBuffer(buf []byte) {
- stream.buf = buf
-}
-
-// Write writes the contents of p into the buffer.
-// It returns the number of bytes written.
-// If nn < len(p), it also returns an error explaining
-// why the write is short.
-func (stream *Stream) Write(p []byte) (nn int, err error) {
- stream.buf = append(stream.buf, p...)
- if stream.out != nil {
- nn, err = stream.out.Write(stream.buf)
- stream.buf = stream.buf[nn:]
- return
- }
- return len(p), nil
-}
-
-// WriteByte writes a single byte.
-func (stream *Stream) writeByte(c byte) {
- stream.buf = append(stream.buf, c)
-}
-
-func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
- stream.buf = append(stream.buf, c1, c2)
-}
-
-func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
- stream.buf = append(stream.buf, c1, c2, c3)
-}
-
-func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
- stream.buf = append(stream.buf, c1, c2, c3, c4)
-}
-
-func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
- stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
-}
-
-// Flush writes any buffered data to the underlying io.Writer.
-func (stream *Stream) Flush() error {
- if stream.out == nil {
- return nil
- }
- if stream.Error != nil {
- return stream.Error
- }
- _, err := stream.out.Write(stream.buf)
- if err != nil {
- if stream.Error == nil {
- stream.Error = err
- }
- return err
- }
- stream.buf = stream.buf[:0]
- return nil
-}
-
-// WriteRaw write string out without quotes, just like []byte
-func (stream *Stream) WriteRaw(s string) {
- stream.buf = append(stream.buf, s...)
-}
-
-// WriteNil write null to stream
-func (stream *Stream) WriteNil() {
- stream.writeFourBytes('n', 'u', 'l', 'l')
-}
-
-// WriteTrue write true to stream
-func (stream *Stream) WriteTrue() {
- stream.writeFourBytes('t', 'r', 'u', 'e')
-}
-
-// WriteFalse write false to stream
-func (stream *Stream) WriteFalse() {
- stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
-}
-
-// WriteBool write true or false into stream
-func (stream *Stream) WriteBool(val bool) {
- if val {
- stream.WriteTrue()
- } else {
- stream.WriteFalse()
- }
-}
-
-// WriteObjectStart write { with possible indention
-func (stream *Stream) WriteObjectStart() {
- stream.indention += stream.cfg.indentionStep
- stream.writeByte('{')
- stream.writeIndention(0)
-}
-
-// WriteObjectField write "field": with possible indention
-func (stream *Stream) WriteObjectField(field string) {
- stream.WriteString(field)
- if stream.indention > 0 {
- stream.writeTwoBytes(':', ' ')
- } else {
- stream.writeByte(':')
- }
-}
-
-// WriteObjectEnd write } with possible indention
-func (stream *Stream) WriteObjectEnd() {
- stream.writeIndention(stream.cfg.indentionStep)
- stream.indention -= stream.cfg.indentionStep
- stream.writeByte('}')
-}
-
-// WriteEmptyObject write {}
-func (stream *Stream) WriteEmptyObject() {
- stream.writeByte('{')
- stream.writeByte('}')
-}
-
-// WriteMore write , with possible indention
-func (stream *Stream) WriteMore() {
- stream.writeByte(',')
- stream.writeIndention(0)
-}
-
-// WriteArrayStart write [ with possible indention
-func (stream *Stream) WriteArrayStart() {
- stream.indention += stream.cfg.indentionStep
- stream.writeByte('[')
- stream.writeIndention(0)
-}
-
-// WriteEmptyArray write []
-func (stream *Stream) WriteEmptyArray() {
- stream.writeTwoBytes('[', ']')
-}
-
-// WriteArrayEnd write ] with possible indention
-func (stream *Stream) WriteArrayEnd() {
- stream.writeIndention(stream.cfg.indentionStep)
- stream.indention -= stream.cfg.indentionStep
- stream.writeByte(']')
-}
-
-func (stream *Stream) writeIndention(delta int) {
- if stream.indention == 0 {
- return
- }
- stream.writeByte('\n')
- toWrite := stream.indention - delta
- for i := 0; i < toWrite; i++ {
- stream.buf = append(stream.buf, ' ')
- }
-}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
deleted file mode 100644
index 826aa594..00000000
--- a/vendor/github.com/json-iterator/go/stream_float.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "math"
- "strconv"
-)
-
-var pow10 []uint64
-
-func init() {
- pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
-}
-
-// WriteFloat32 write float32 to stream
-func (stream *Stream) WriteFloat32(val float32) {
- if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- abs := math.Abs(float64(val))
- fmt := byte('f')
- // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
- if abs != 0 {
- if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
- fmt = 'e'
- }
- }
- stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
-}
-
-// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
-func (stream *Stream) WriteFloat32Lossy(val float32) {
- if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- if val < 0 {
- stream.writeByte('-')
- val = -val
- }
- if val > 0x4ffffff {
- stream.WriteFloat32(val)
- return
- }
- precision := 6
- exp := uint64(1000000) // 6
- lval := uint64(float64(val)*float64(exp) + 0.5)
- stream.WriteUint64(lval / exp)
- fval := lval % exp
- if fval == 0 {
- return
- }
- stream.writeByte('.')
- for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
- stream.writeByte('0')
- }
- stream.WriteUint64(fval)
- for stream.buf[len(stream.buf)-1] == '0' {
- stream.buf = stream.buf[:len(stream.buf)-1]
- }
-}
-
-// WriteFloat64 write float64 to stream
-func (stream *Stream) WriteFloat64(val float64) {
- if math.IsInf(val, 0) || math.IsNaN(val) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- abs := math.Abs(val)
- fmt := byte('f')
- // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
- if abs != 0 {
- if abs < 1e-6 || abs >= 1e21 {
- fmt = 'e'
- }
- }
- stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
-}
-
-// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
-func (stream *Stream) WriteFloat64Lossy(val float64) {
- if math.IsInf(val, 0) || math.IsNaN(val) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- if val < 0 {
- stream.writeByte('-')
- val = -val
- }
- if val > 0x4ffffff {
- stream.WriteFloat64(val)
- return
- }
- precision := 6
- exp := uint64(1000000) // 6
- lval := uint64(val*float64(exp) + 0.5)
- stream.WriteUint64(lval / exp)
- fval := lval % exp
- if fval == 0 {
- return
- }
- stream.writeByte('.')
- for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
- stream.writeByte('0')
- }
- stream.WriteUint64(fval)
- for stream.buf[len(stream.buf)-1] == '0' {
- stream.buf = stream.buf[:len(stream.buf)-1]
- }
-}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
deleted file mode 100644
index d1059ee4..00000000
--- a/vendor/github.com/json-iterator/go/stream_int.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package jsoniter
-
-var digits []uint32
-
-func init() {
- digits = make([]uint32, 1000)
- for i := uint32(0); i < 1000; i++ {
- digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
- if i < 10 {
- digits[i] += 2 << 24
- } else if i < 100 {
- digits[i] += 1 << 24
- }
- }
-}
-
-func writeFirstBuf(space []byte, v uint32) []byte {
- start := v >> 24
- if start == 0 {
- space = append(space, byte(v>>16), byte(v>>8))
- } else if start == 1 {
- space = append(space, byte(v>>8))
- }
- space = append(space, byte(v))
- return space
-}
-
-func writeBuf(buf []byte, v uint32) []byte {
- return append(buf, byte(v>>16), byte(v>>8), byte(v))
-}
-
-// WriteUint8 write uint8 to stream
-func (stream *Stream) WriteUint8(val uint8) {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
-}
-
-// WriteInt8 write int8 to stream
-func (stream *Stream) WriteInt8(nval int8) {
- var val uint8
- if nval < 0 {
- val = uint8(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint8(nval)
- }
- stream.buf = writeFirstBuf(stream.buf, digits[val])
-}
-
-// WriteUint16 write uint16 to stream
-func (stream *Stream) WriteUint16(val uint16) {
- q1 := val / 1000
- if q1 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
- return
- }
- r1 := val - q1*1000
- stream.buf = writeFirstBuf(stream.buf, digits[q1])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
-}
-
-// WriteInt16 write int16 to stream
-func (stream *Stream) WriteInt16(nval int16) {
- var val uint16
- if nval < 0 {
- val = uint16(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint16(nval)
- }
- stream.WriteUint16(val)
-}
-
-// WriteUint32 write uint32 to stream
-func (stream *Stream) WriteUint32(val uint32) {
- q1 := val / 1000
- if q1 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
- return
- }
- r1 := val - q1*1000
- q2 := q1 / 1000
- if q2 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q1])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r2 := q1 - q2*1000
- q3 := q2 / 1000
- if q3 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q2])
- } else {
- r3 := q2 - q3*1000
- stream.buf = append(stream.buf, byte(q3+'0'))
- stream.buf = writeBuf(stream.buf, digits[r3])
- }
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
-}
-
-// WriteInt32 write int32 to stream
-func (stream *Stream) WriteInt32(nval int32) {
- var val uint32
- if nval < 0 {
- val = uint32(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint32(nval)
- }
- stream.WriteUint32(val)
-}
-
-// WriteUint64 write uint64 to stream
-func (stream *Stream) WriteUint64(val uint64) {
- q1 := val / 1000
- if q1 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
- return
- }
- r1 := val - q1*1000
- q2 := q1 / 1000
- if q2 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q1])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r2 := q1 - q2*1000
- q3 := q2 / 1000
- if q3 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q2])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r3 := q2 - q3*1000
- q4 := q3 / 1000
- if q4 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q3])
- stream.buf = writeBuf(stream.buf, digits[r3])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r4 := q3 - q4*1000
- q5 := q4 / 1000
- if q5 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q4])
- stream.buf = writeBuf(stream.buf, digits[r4])
- stream.buf = writeBuf(stream.buf, digits[r3])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r5 := q4 - q5*1000
- q6 := q5 / 1000
- if q6 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q5])
- } else {
- stream.buf = writeFirstBuf(stream.buf, digits[q6])
- r6 := q5 - q6*1000
- stream.buf = writeBuf(stream.buf, digits[r6])
- }
- stream.buf = writeBuf(stream.buf, digits[r5])
- stream.buf = writeBuf(stream.buf, digits[r4])
- stream.buf = writeBuf(stream.buf, digits[r3])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
-}
-
-// WriteInt64 write int64 to stream
-func (stream *Stream) WriteInt64(nval int64) {
- var val uint64
- if nval < 0 {
- val = uint64(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint64(nval)
- }
- stream.WriteUint64(val)
-}
-
-// WriteInt write int to stream
-func (stream *Stream) WriteInt(val int) {
- stream.WriteInt64(int64(val))
-}
-
-// WriteUint write uint to stream
-func (stream *Stream) WriteUint(val uint) {
- stream.WriteUint64(uint64(val))
-}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
deleted file mode 100644
index 54c2ba0b..00000000
--- a/vendor/github.com/json-iterator/go/stream_str.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package jsoniter
-
-import (
- "unicode/utf8"
-)
-
-// htmlSafeSet holds the value true if the ASCII character with the given
-// array position can be safely represented inside a JSON string, embedded
-// inside of HTML