From 0884f89431cd26bcc9674b3b7ab628b090f5881e Mon Sep 17 00:00:00 2001
From: tobi <31960611+tsmethurst@users.noreply.github.com>
Date: Tue, 7 Dec 2021 13:31:39 +0100
Subject: [PATCH] Implement Cobra CLI tooling, Viper config tooling (#336)
* start pulling out + replacing urfave and config
* replace many many instances of config
* move more stuff => viper
* properly remove urfave
* move some flags to root command
* add testrig commands to root
* alias config file keys
* start adding cli parsing tests
* reorder viper init
* remove config path alias
* fmt
* change config file keys to non-nested
* we're more or less in business now
* tidy up the common func
* go fmt
* get tests passing again
* add note about the cliparsing tests
* reorganize
* update docs with changes
* structure cmd dir better
* rename + move some files around
* fix dangling comma
---
.drone.yml | 3 +-
CONTRIBUTING.md | 6 +
README.md | 4 +-
cmd/gotosocial/accountsflags.go | 47 -
.../gotosocial/action}/action.go | 6 +-
.../action}/admin/account/account.go | 65 +-
.../gotosocial/action}/admin/trans/export.go | 11 +-
.../gotosocial/action}/admin/trans/import.go | 11 +-
.../debug/config/config.go} | 28 +-
.../gotosocial/action}/server/server.go | 103 +-
.../gotosocial/action}/testrig/testrig.go | 77 +-
cmd/gotosocial/admin.go | 169 ++
cmd/gotosocial/admincommands.go | 184 --
cmd/gotosocial/common.go | 64 +
cmd/gotosocial/databaseflags.go | 77 -
cmd/gotosocial/{flags.go => debug.go} | 43 +-
cmd/gotosocial/flag/admin.go | 62 +
cmd/gotosocial/flag/global.go | 45 +
cmd/gotosocial/flag/server.go | 111 +
cmd/gotosocial/flag/usage.go | 80 +
cmd/gotosocial/generalflags.go | 84 -
cmd/gotosocial/letsencryptflags.go | 53 -
cmd/gotosocial/main.go | 49 +-
cmd/gotosocial/mediaflags.go | 53 -
cmd/gotosocial/oidcflags.go | 71 -
cmd/gotosocial/runaction.go | 82 -
.../{templateflags.go => server.go} | 34 +-
cmd/gotosocial/servercommands.go | 42 -
cmd/gotosocial/smtpflags.go | 59 -
cmd/gotosocial/statusesflags.go | 59 -
cmd/gotosocial/storageflags.go | 59 -
.../{testrigcommands.go => testrig.go} | 32 +-
docs/admin/cli.md | 158 +-
docs/configuration/accounts.md | 25 +-
docs/configuration/database.md | 81 +-
docs/configuration/general.md | 10 +-
docs/configuration/index.md | 36 +-
docs/configuration/letsencrypt.md | 55 +-
docs/configuration/media.md | 33 +-
docs/configuration/oidc.md | 83 +-
docs/configuration/smtp.md | 53 +-
docs/configuration/statuses.md | 62 +-
docs/configuration/storage.md | 59 +-
docs/configuration/template.md | 22 -
docs/configuration/web.md | 21 +
docs/installation_guide/binary.md | 8 +-
docs/installation_guide/index.md | 2 +-
example/config.yaml | 469 ++--
go.mod | 17 +-
go.sum | 268 +-
internal/api/client/account/account.go | 5 +-
internal/api/client/account/account_test.go | 11 +-
internal/api/client/account/accountcreate.go | 14 +-
internal/api/client/admin/admin.go | 5 +-
internal/api/client/app/app.go | 5 +-
internal/api/client/auth/auth.go | 5 +-
internal/api/client/auth/auth_test.go | 132 +-
internal/api/client/auth/callback.go | 5 +-
internal/api/client/blocks/blocks.go | 5 +-
internal/api/client/emoji/emoji.go | 5 +-
internal/api/client/favourites/favourites.go | 5 +-
internal/api/client/fileserver/fileserver.go | 15 +-
.../api/client/fileserver/servefile_test.go | 8 +-
internal/api/client/filter/filter.go | 5 +-
.../api/client/followrequest/followrequest.go | 5 +-
.../followrequest/followrequest_test.go | 11 +-
internal/api/client/instance/instance.go | 5 +-
internal/api/client/instance/instanceget.go | 9 +-
internal/api/client/list/list.go | 5 +-
internal/api/client/media/media.go | 5 +-
internal/api/client/media/mediacreate.go | 24 +-
internal/api/client/media/mediacreate_test.go | 8 +-
internal/api/client/media/mediaupdate.go | 15 +-
.../api/client/notification/notification.go | 5 +-
internal/api/client/search/search.go | 5 +-
internal/api/client/status/status.go | 5 +-
internal/api/client/status/status_test.go | 8 +-
internal/api/client/status/statuscreate.go | 35 +-
internal/api/client/streaming/streaming.go | 5 +-
internal/api/client/timeline/timeline.go | 5 +-
internal/api/client/user/user.go | 5 +-
internal/api/client/user/user_test.go | 8 +-
internal/api/s2s/nodeinfo/nodeinfo.go | 5 +-
internal/api/s2s/user/inboxpost_test.go | 8 +-
internal/api/s2s/user/outboxget_test.go | 6 +-
internal/api/s2s/user/repliesget_test.go | 6 +-
internal/api/s2s/user/user.go | 5 +-
internal/api/s2s/user/user_test.go | 11 +-
internal/api/s2s/user/userget_test.go | 2 +-
internal/api/s2s/webfinger/webfinger.go | 5 +-
internal/api/s2s/webfinger/webfinger_test.go | 11 +-
internal/api/s2s/webfinger/webfingerget.go | 15 +-
.../api/s2s/webfinger/webfingerget_test.go | 30 +-
internal/api/security/security.go | 5 +-
internal/config/config.go | 622 -----
internal/config/db.go | 49 -
internal/config/default.go | 289 ---
internal/config/defaults.go | 87 +
internal/config/{oidc.go => file.go} | 26 +-
internal/config/keys.go | 175 ++
internal/config/letsencrypt.go | 13 -
internal/config/media.go | 31 -
internal/config/smtp.go | 33 -
internal/config/statuses.go | 33 -
internal/config/storage.go | 36 -
internal/config/template.go | 27 -
internal/config/values.go | 90 +
internal/config/{accounts.go => viper.go} | 29 +-
internal/db/bundb/account.go | 9 +-
internal/db/bundb/admin.go | 25 +-
internal/db/bundb/basic.go | 4 +-
internal/db/bundb/bundb.go | 149 +-
internal/db/bundb/bundb_test.go | 8 +-
internal/db/bundb/domain.go | 4 +-
internal/db/bundb/instance.go | 14 +-
internal/db/bundb/media.go | 4 +-
internal/db/bundb/mention.go | 6 +-
internal/db/bundb/notification.go | 6 +-
internal/db/bundb/relationship.go | 4 +-
internal/db/bundb/session.go | 4 +-
internal/db/bundb/status.go | 6 +-
internal/db/bundb/timeline.go | 4 +-
internal/email/noopsender.go | 6 +-
internal/email/sender.go | 20 +-
internal/federation/authenticate.go | 5 +-
.../federation/dereferencing/dereferencer.go | 5 +-
.../dereferencing/dereferencer_test.go | 9 +-
internal/federation/dereferencing/thread.go | 14 +-
internal/federation/federatingdb/db.go | 7 +-
.../federatingdb/federatingdb_test.go | 6 +-
internal/federation/federatingdb/owns.go | 7 +-
.../federation/federatingdb/reject_test.go | 2 +-
internal/federation/federatingdb/update.go | 5 +-
internal/federation/federatingdb/util.go | 9 +-
internal/federation/federator.go | 7 +-
internal/federation/federator_test.go | 14 +-
internal/gotosocial/gotosocial.go | 5 +-
internal/media/handler.go | 18 +-
internal/media/processicon.go | 9 +-
internal/media/processimage.go | 9 +-
internal/oauth/clientstore_test.go | 2 +
internal/oidc/idp.go | 47 +-
internal/processing/account/account.go | 7 +-
internal/processing/account/account_test.go | 9 +-
internal/processing/account/create.go | 10 +-
internal/processing/account/createblock.go | 2 +-
internal/processing/account/createfollow.go | 2 +-
internal/processing/account/update.go | 15 +-
internal/processing/admin/admin.go | 5 +-
internal/processing/blocks.go | 13 +-
internal/processing/federation/federation.go | 5 +-
internal/processing/federation/getnodeinfo.go | 14 +-
.../processing/federation/getwebfinger.go | 6 +-
internal/processing/instance.go | 13 +-
internal/processing/media/media.go | 5 +-
internal/processing/processor.go | 16 +-
internal/processing/processor_test.go | 9 +-
internal/processing/search.go | 5 +-
internal/processing/status/create.go | 2 +-
internal/processing/status/fave.go | 2 +-
internal/processing/status/status.go | 7 +-
internal/processing/status/status_test.go | 32 +-
internal/processing/status/util_test.go | 30 -
.../processing/streaming/streaming_test.go | 4 +-
internal/processing/timeline.go | 16 +-
internal/processing/user/emailconfirm.go | 7 +-
internal/processing/user/user.go | 5 +-
internal/processing/user/user_test.go | 7 +-
internal/router/cors.go | 3 +-
internal/router/router.go | 42 +-
internal/router/session.go | 20 +-
internal/router/session_test.go | 50 +-
internal/router/template.go | 6 +-
internal/text/formatter.go | 9 +-
internal/text/formatter_test.go | 10 +-
internal/timeline/get_test.go | 5 +-
internal/timeline/index_test.go | 5 +-
internal/timeline/manager.go | 5 +-
internal/timeline/manager_test.go | 5 +-
internal/timeline/timeline_test.go | 6 +-
internal/trans/trans_test.go | 3 +-
internal/transport/controller.go | 13 +-
internal/typeutils/converter.go | 5 +-
internal/typeutils/converter_test.go | 9 +-
internal/typeutils/internal.go | 2 +-
internal/typeutils/internaltoas.go | 5 +-
internal/typeutils/internaltofrontend.go | 35 +-
internal/typeutils/wrap.go | 2 +-
internal/util/uri.go | 27 +-
internal/visibility/filter_test.go | 6 +-
internal/web/base.go | 17 +-
internal/web/confirmemail.go | 5 +-
internal/web/thread.go | 8 +-
scripts/build.sh | 2 +-
test/cliparsing.sh | 117 +
test/test.json | 64 +
test/test.yaml | 399 +++
test/test2.yaml | 2 +
testrig/config.go | 102 +-
testrig/db.go | 20 +-
testrig/email.go | 10 +-
testrig/federatingdb.go | 2 +-
testrig/federator.go | 2 +-
testrig/mediahandler.go | 2 +-
testrig/processor.go | 2 +-
testrig/router.go | 2 +-
testrig/timelinemanager.go | 2 +-
testrig/transportcontroller.go | 2 +-
testrig/typeconverter.go | 2 +-
.../cpuguy83/go-md2man/v2/md2man/md2man.go | 14 -
.../cpuguy83/go-md2man/v2/md2man/roff.go | 336 ---
.../fsnotify/fsnotify/.editorconfig | 12 +
.../fsnotify/fsnotify/.gitattributes | 1 +
.../github.com/fsnotify/fsnotify/.gitignore | 6 +
vendor/github.com/fsnotify/fsnotify/.mailmap | 2 +
vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 +
.../github.com/fsnotify/fsnotify/CHANGELOG.md | 339 +++
.../fsnotify/fsnotify/CONTRIBUTING.md | 77 +
vendor/github.com/fsnotify/fsnotify/LICENSE | 28 +
vendor/github.com/fsnotify/fsnotify/README.md | 130 +
vendor/github.com/fsnotify/fsnotify/fen.go | 38 +
.../github.com/fsnotify/fsnotify/fsnotify.go | 69 +
.../github.com/fsnotify/fsnotify/inotify.go | 338 +++
.../fsnotify/fsnotify/inotify_poller.go | 188 ++
vendor/github.com/fsnotify/fsnotify/kqueue.go | 522 ++++
.../fsnotify/fsnotify/open_mode_bsd.go | 12 +
.../fsnotify/fsnotify/open_mode_darwin.go | 13 +
.../github.com/fsnotify/fsnotify/windows.go | 562 +++++
vendor/github.com/hashicorp/hcl/.gitignore | 9 +
vendor/github.com/hashicorp/hcl/.travis.yml | 13 +
vendor/github.com/hashicorp/hcl/LICENSE | 354 +++
vendor/github.com/hashicorp/hcl/Makefile | 18 +
vendor/github.com/hashicorp/hcl/README.md | 125 +
vendor/github.com/hashicorp/hcl/appveyor.yml | 19 +
vendor/github.com/hashicorp/hcl/decoder.go | 729 ++++++
vendor/github.com/hashicorp/hcl/hcl.go | 11 +
.../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 ++
.../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 +
.../hashicorp/hcl/hcl/parser/error.go | 17 +
.../hashicorp/hcl/hcl/parser/parser.go | 532 ++++
.../hashicorp/hcl/hcl/printer/nodes.go | 789 ++++++
.../hashicorp/hcl/hcl/printer/printer.go | 66 +
.../hashicorp/hcl/hcl/scanner/scanner.go | 652 +++++
.../hashicorp/hcl/hcl/strconv/quote.go | 241 ++
.../hashicorp/hcl/hcl/token/position.go | 46 +
.../hashicorp/hcl/hcl/token/token.go | 219 ++
.../hashicorp/hcl/json/parser/flatten.go | 117 +
.../hashicorp/hcl/json/parser/parser.go | 313 +++
.../hashicorp/hcl/json/scanner/scanner.go | 451 ++++
.../hashicorp/hcl/json/token/position.go | 46 +
.../hashicorp/hcl/json/token/token.go | 118 +
vendor/github.com/hashicorp/hcl/lex.go | 38 +
vendor/github.com/hashicorp/hcl/parse.go | 39 +
.../inconshreveable/mousetrap/LICENSE | 13 +
.../inconshreveable/mousetrap/README.md | 23 +
.../inconshreveable/mousetrap/trap_others.go | 15 +
.../inconshreveable/mousetrap/trap_windows.go | 98 +
.../mousetrap/trap_windows_1.4.go | 46 +
.../magiconair/properties/.gitignore | 6 +
.../magiconair/properties/.travis.yml | 17 +
.../magiconair/properties/CHANGELOG.md | 160 ++
.../magiconair/properties/LICENSE.md | 24 +
.../magiconair/properties/README.md | 128 +
.../magiconair/properties/decode.go | 289 +++
.../github.com/magiconair/properties/doc.go | 156 ++
.../magiconair/properties/integrate.go | 34 +
.../github.com/magiconair/properties/lex.go | 407 ++++
.../github.com/magiconair/properties/load.go | 293 +++
.../magiconair/properties/parser.go | 95 +
.../magiconair/properties/properties.go | 854 +++++++
.../magiconair/properties/rangecheck.go | 31 +
.../pelletier/go-toml/.dockerignore | 2 +
.../github.com/pelletier/go-toml/.gitignore | 5 +
.../pelletier/go-toml/CONTRIBUTING.md | 132 +
.../github.com/pelletier/go-toml/Dockerfile | 11 +
vendor/github.com/pelletier/go-toml/LICENSE | 247 ++
vendor/github.com/pelletier/go-toml/Makefile | 29 +
.../go-toml/PULL_REQUEST_TEMPLATE.md | 5 +
vendor/github.com/pelletier/go-toml/README.md | 176 ++
.../pelletier/go-toml/azure-pipelines.yml | 188 ++
.../github.com/pelletier/go-toml/benchmark.sh | 35 +
vendor/github.com/pelletier/go-toml/doc.go | 23 +
.../pelletier/go-toml/example-crlf.toml | 30 +
.../github.com/pelletier/go-toml/example.toml | 30 +
vendor/github.com/pelletier/go-toml/fuzz.go | 31 +
vendor/github.com/pelletier/go-toml/fuzz.sh | 15 +
.../pelletier/go-toml/keysparsing.go | 112 +
vendor/github.com/pelletier/go-toml/lexer.go | 1031 ++++++++
.../github.com/pelletier/go-toml/localtime.go | 287 +++
.../github.com/pelletier/go-toml/marshal.go | 1308 ++++++++++
.../go-toml/marshal_OrderPreserve_test.toml | 39 +
.../pelletier/go-toml/marshal_test.toml | 39 +
vendor/github.com/pelletier/go-toml/parser.go | 508 ++++
.../github.com/pelletier/go-toml/position.go | 29 +
vendor/github.com/pelletier/go-toml/token.go | 136 ++
vendor/github.com/pelletier/go-toml/toml.go | 533 ++++
.../github.com/pelletier/go-toml/tomlpub.go | 71 +
.../pelletier/go-toml/tomltree_create.go | 155 ++
.../pelletier/go-toml/tomltree_write.go | 552 +++++
.../pelletier/go-toml/tomltree_writepub.go | 6 +
vendor/github.com/spf13/afero/.gitignore | 2 +
vendor/github.com/spf13/afero/.travis.yml | 26 +
vendor/github.com/spf13/afero/LICENSE.txt | 174 ++
vendor/github.com/spf13/afero/README.md | 430 ++++
vendor/github.com/spf13/afero/afero.go | 111 +
vendor/github.com/spf13/afero/appveyor.yml | 15 +
vendor/github.com/spf13/afero/basepath.go | 211 ++
.../github.com/spf13/afero/cacheOnReadFs.go | 311 +++
vendor/github.com/spf13/afero/const_bsds.go | 22 +
.../github.com/spf13/afero/const_win_unix.go | 26 +
.../github.com/spf13/afero/copyOnWriteFs.go | 326 +++
vendor/github.com/spf13/afero/httpFs.go | 114 +
vendor/github.com/spf13/afero/iofs.go | 288 +++
vendor/github.com/spf13/afero/ioutil.go | 240 ++
vendor/github.com/spf13/afero/lstater.go | 27 +
vendor/github.com/spf13/afero/match.go | 110 +
vendor/github.com/spf13/afero/mem/dir.go | 37 +
vendor/github.com/spf13/afero/mem/dirmap.go | 43 +
vendor/github.com/spf13/afero/mem/file.go | 338 +++
vendor/github.com/spf13/afero/memmap.go | 404 +++
vendor/github.com/spf13/afero/os.go | 113 +
vendor/github.com/spf13/afero/path.go | 106 +
vendor/github.com/spf13/afero/readonlyfs.go | 96 +
vendor/github.com/spf13/afero/regexpfs.go | 224 ++
vendor/github.com/spf13/afero/symlink.go | 55 +
vendor/github.com/spf13/afero/unionFile.go | 317 +++
vendor/github.com/spf13/afero/util.go | 330 +++
vendor/github.com/spf13/cast/.gitignore | 25 +
.../v2/LICENSE.md => spf13/cast/LICENSE} | 4 +-
vendor/github.com/spf13/cast/Makefile | 40 +
vendor/github.com/spf13/cast/README.md | 75 +
vendor/github.com/spf13/cast/cast.go | 176 ++
vendor/github.com/spf13/cast/caste.go | 1337 ++++++++++
.../spf13/cast/timeformattype_string.go | 27 +
vendor/github.com/spf13/cobra/.gitignore | 39 +
vendor/github.com/spf13/cobra/.golangci.yml | 48 +
vendor/github.com/spf13/cobra/.mailmap | 3 +
vendor/github.com/spf13/cobra/CHANGELOG.md | 51 +
vendor/github.com/spf13/cobra/CONDUCT.md | 37 +
vendor/github.com/spf13/cobra/CONTRIBUTING.md | 50 +
vendor/github.com/spf13/cobra/LICENSE.txt | 174 ++
vendor/github.com/spf13/cobra/Makefile | 40 +
vendor/github.com/spf13/cobra/README.md | 125 +
vendor/github.com/spf13/cobra/args.go | 109 +
.../spf13/cobra/bash_completions.go | 685 ++++++
.../spf13/cobra/bash_completions.md | 93 +
.../spf13/cobra/bash_completionsV2.go | 302 +++
vendor/github.com/spf13/cobra/cobra.go | 222 ++
vendor/github.com/spf13/cobra/command.go | 1680 +++++++++++++
.../github.com/spf13/cobra/command_notwin.go | 5 +
vendor/github.com/spf13/cobra/command_win.go | 26 +
vendor/github.com/spf13/cobra/completions.go | 781 ++++++
.../spf13/cobra/fish_completions.go | 219 ++
.../spf13/cobra/fish_completions.md | 4 +
.../spf13/cobra/powershell_completions.go | 285 +++
.../spf13/cobra/powershell_completions.md | 3 +
.../spf13/cobra/projects_using_cobra.md | 38 +
.../spf13/cobra/shell_completions.go | 84 +
.../spf13/cobra/shell_completions.md | 546 +++++
vendor/github.com/spf13/cobra/user_guide.md | 637 +++++
.../github.com/spf13/cobra/zsh_completions.go | 258 ++
.../github.com/spf13/cobra/zsh_completions.md | 48 +
.../spf13/jwalterweatherman/.gitignore | 24 +
.../v2 => spf13/jwalterweatherman}/LICENSE | 6 +-
.../spf13/jwalterweatherman/README.md | 148 ++
.../jwalterweatherman/default_notepad.go | 111 +
.../spf13/jwalterweatherman/log_counter.go | 46 +
.../spf13/jwalterweatherman/notepad.go | 225 ++
vendor/github.com/spf13/pflag/.gitignore | 2 +
vendor/github.com/spf13/pflag/.travis.yml | 22 +
vendor/github.com/spf13/pflag/LICENSE | 28 +
vendor/github.com/spf13/pflag/README.md | 296 +++
vendor/github.com/spf13/pflag/bool.go | 94 +
vendor/github.com/spf13/pflag/bool_slice.go | 185 ++
vendor/github.com/spf13/pflag/bytes.go | 209 ++
vendor/github.com/spf13/pflag/count.go | 96 +
vendor/github.com/spf13/pflag/duration.go | 86 +
.../github.com/spf13/pflag/duration_slice.go | 166 ++
vendor/github.com/spf13/pflag/flag.go | 1239 ++++++++++
vendor/github.com/spf13/pflag/float32.go | 88 +
.../github.com/spf13/pflag/float32_slice.go | 174 ++
vendor/github.com/spf13/pflag/float64.go | 84 +
.../github.com/spf13/pflag/float64_slice.go | 166 ++
vendor/github.com/spf13/pflag/golangflag.go | 105 +
vendor/github.com/spf13/pflag/int.go | 84 +
vendor/github.com/spf13/pflag/int16.go | 88 +
vendor/github.com/spf13/pflag/int32.go | 88 +
vendor/github.com/spf13/pflag/int32_slice.go | 174 ++
vendor/github.com/spf13/pflag/int64.go | 84 +
vendor/github.com/spf13/pflag/int64_slice.go | 166 ++
vendor/github.com/spf13/pflag/int8.go | 88 +
vendor/github.com/spf13/pflag/int_slice.go | 158 ++
vendor/github.com/spf13/pflag/ip.go | 94 +
vendor/github.com/spf13/pflag/ip_slice.go | 186 ++
vendor/github.com/spf13/pflag/ipmask.go | 122 +
vendor/github.com/spf13/pflag/ipnet.go | 98 +
vendor/github.com/spf13/pflag/string.go | 80 +
vendor/github.com/spf13/pflag/string_array.go | 129 +
vendor/github.com/spf13/pflag/string_slice.go | 163 ++
.../github.com/spf13/pflag/string_to_int.go | 149 ++
.../github.com/spf13/pflag/string_to_int64.go | 149 ++
.../spf13/pflag/string_to_string.go | 160 ++
vendor/github.com/spf13/pflag/uint.go | 88 +
vendor/github.com/spf13/pflag/uint16.go | 88 +
vendor/github.com/spf13/pflag/uint32.go | 88 +
vendor/github.com/spf13/pflag/uint64.go | 88 +
vendor/github.com/spf13/pflag/uint8.go | 88 +
vendor/github.com/spf13/pflag/uint_slice.go | 168 ++
vendor/github.com/spf13/viper/.editorconfig | 15 +
vendor/github.com/spf13/viper/.gitignore | 5 +
vendor/github.com/spf13/viper/.golangci.yml | 93 +
vendor/github.com/spf13/viper/LICENSE | 21 +
vendor/github.com/spf13/viper/Makefile | 76 +
vendor/github.com/spf13/viper/README.md | 874 +++++++
.../github.com/spf13/viper/TROUBLESHOOTING.md | 23 +
vendor/github.com/spf13/viper/flags.go | 57 +
.../spf13/viper/internal/encoding/decoder.go | 61 +
.../spf13/viper/internal/encoding/encoder.go | 60 +
.../spf13/viper/internal/encoding/error.go | 7 +
.../viper/internal/encoding/hcl/codec.go | 40 +
.../viper/internal/encoding/json/codec.go | 17 +
.../viper/internal/encoding/toml/codec.go | 45 +
.../viper/internal/encoding/yaml/codec.go | 14 +
vendor/github.com/spf13/viper/util.go | 218 ++
vendor/github.com/spf13/viper/viper.go | 2156 +++++++++++++++++
vendor/github.com/spf13/viper/watch.go | 11 +
vendor/github.com/spf13/viper/watch_wasm.go | 30 +
vendor/github.com/subosito/gotenv/.env | 1 +
.../github.com/subosito/gotenv/.env.invalid | 1 +
vendor/github.com/subosito/gotenv/.gitignore | 3 +
vendor/github.com/subosito/gotenv/.travis.yml | 10 +
.../github.com/subosito/gotenv/CHANGELOG.md | 47 +
vendor/github.com/subosito/gotenv/LICENSE | 21 +
vendor/github.com/subosito/gotenv/README.md | 131 +
.../github.com/subosito/gotenv/appveyor.yml | 9 +
vendor/github.com/subosito/gotenv/gotenv.go | 265 ++
vendor/github.com/urfave/cli/v2/.flake8 | 2 -
vendor/github.com/urfave/cli/v2/.gitignore | 7 -
.../urfave/cli/v2/CODE_OF_CONDUCT.md | 74 -
vendor/github.com/urfave/cli/v2/README.md | 70 -
vendor/github.com/urfave/cli/v2/app.go | 540 -----
vendor/github.com/urfave/cli/v2/args.go | 54 -
vendor/github.com/urfave/cli/v2/category.go | 79 -
vendor/github.com/urfave/cli/v2/cli.go | 23 -
vendor/github.com/urfave/cli/v2/command.go | 300 ---
vendor/github.com/urfave/cli/v2/context.go | 273 ---
vendor/github.com/urfave/cli/v2/docs.go | 148 --
vendor/github.com/urfave/cli/v2/errors.go | 141 --
vendor/github.com/urfave/cli/v2/fish.go | 196 --
vendor/github.com/urfave/cli/v2/flag.go | 388 ---
vendor/github.com/urfave/cli/v2/flag_bool.go | 106 -
.../github.com/urfave/cli/v2/flag_duration.go | 105 -
.../github.com/urfave/cli/v2/flag_float64.go | 106 -
.../urfave/cli/v2/flag_float64_slice.go | 163 --
.../github.com/urfave/cli/v2/flag_generic.go | 108 -
vendor/github.com/urfave/cli/v2/flag_int.go | 106 -
vendor/github.com/urfave/cli/v2/flag_int64.go | 105 -
.../urfave/cli/v2/flag_int64_slice.go | 159 --
.../urfave/cli/v2/flag_int_slice.go | 173 --
vendor/github.com/urfave/cli/v2/flag_path.go | 95 -
.../github.com/urfave/cli/v2/flag_string.go | 95 -
.../urfave/cli/v2/flag_string_slice.go | 180 --
.../urfave/cli/v2/flag_timestamp.go | 154 --
vendor/github.com/urfave/cli/v2/flag_uint.go | 105 -
.../github.com/urfave/cli/v2/flag_uint64.go | 105 -
vendor/github.com/urfave/cli/v2/funcs.go | 44 -
vendor/github.com/urfave/cli/v2/help.go | 386 ---
vendor/github.com/urfave/cli/v2/parse.go | 94 -
vendor/github.com/urfave/cli/v2/sort.go | 29 -
vendor/github.com/urfave/cli/v2/template.go | 120 -
vendor/gopkg.in/ini.v1/.gitignore | 6 +
vendor/gopkg.in/ini.v1/.golangci.yml | 21 +
vendor/gopkg.in/ini.v1/LICENSE | 191 ++
vendor/gopkg.in/ini.v1/Makefile | 15 +
vendor/gopkg.in/ini.v1/README.md | 43 +
vendor/gopkg.in/ini.v1/codecov.yml | 9 +
vendor/gopkg.in/ini.v1/data_source.go | 76 +
vendor/gopkg.in/ini.v1/deprecated.go | 25 +
vendor/gopkg.in/ini.v1/error.go | 34 +
vendor/gopkg.in/ini.v1/file.go | 517 ++++
vendor/gopkg.in/ini.v1/helper.go | 24 +
vendor/gopkg.in/ini.v1/ini.go | 176 ++
vendor/gopkg.in/ini.v1/key.go | 828 +++++++
vendor/gopkg.in/ini.v1/parser.go | 531 ++++
vendor/gopkg.in/ini.v1/section.go | 256 ++
vendor/gopkg.in/ini.v1/struct.go | 747 ++++++
vendor/modules.txt | 66 +-
487 files changed, 46667 insertions(+), 8831 deletions(-)
delete mode 100644 cmd/gotosocial/accountsflags.go
rename {internal/cliactions => cmd/gotosocial/action}/action.go (87%)
rename {internal/cliactions => cmd/gotosocial/action}/admin/account/account.go (77%)
rename {internal/cliactions => cmd/gotosocial/action}/admin/trans/export.go (82%)
rename {internal/cliactions => cmd/gotosocial/action}/admin/trans/import.go (82%)
rename cmd/gotosocial/{commands.go => action/debug/config/config.go} (64%)
rename {internal/cliactions => cmd/gotosocial/action}/server/server.go (66%)
rename {internal/cliactions => cmd/gotosocial/action}/testrig/testrig.go (69%)
create mode 100644 cmd/gotosocial/admin.go
delete mode 100644 cmd/gotosocial/admincommands.go
create mode 100644 cmd/gotosocial/common.go
delete mode 100644 cmd/gotosocial/databaseflags.go
rename cmd/gotosocial/{flags.go => debug.go} (52%)
create mode 100644 cmd/gotosocial/flag/admin.go
create mode 100644 cmd/gotosocial/flag/global.go
create mode 100644 cmd/gotosocial/flag/server.go
create mode 100644 cmd/gotosocial/flag/usage.go
delete mode 100644 cmd/gotosocial/generalflags.go
delete mode 100644 cmd/gotosocial/letsencryptflags.go
delete mode 100644 cmd/gotosocial/mediaflags.go
delete mode 100644 cmd/gotosocial/oidcflags.go
delete mode 100644 cmd/gotosocial/runaction.go
rename cmd/gotosocial/{templateflags.go => server.go} (53%)
delete mode 100644 cmd/gotosocial/servercommands.go
delete mode 100644 cmd/gotosocial/smtpflags.go
delete mode 100644 cmd/gotosocial/statusesflags.go
delete mode 100644 cmd/gotosocial/storageflags.go
rename cmd/gotosocial/{testrigcommands.go => testrig.go} (60%)
delete mode 100644 docs/configuration/template.md
create mode 100644 docs/configuration/web.md
delete mode 100644 internal/config/config.go
delete mode 100644 internal/config/db.go
delete mode 100644 internal/config/default.go
create mode 100644 internal/config/defaults.go
rename internal/config/{oidc.go => file.go} (60%)
create mode 100644 internal/config/keys.go
delete mode 100644 internal/config/letsencrypt.go
delete mode 100644 internal/config/media.go
delete mode 100644 internal/config/smtp.go
delete mode 100644 internal/config/statuses.go
delete mode 100644 internal/config/storage.go
delete mode 100644 internal/config/template.go
create mode 100644 internal/config/values.go
rename internal/config/{accounts.go => viper.go} (60%)
create mode 100755 test/cliparsing.sh
create mode 100644 test/test.json
create mode 100644 test/test.yaml
create mode 100644 test/test2.yaml
delete mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
delete mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig
create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes
create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore
create mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap
create mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS
create mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE
create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go
create mode 100644 vendor/github.com/hashicorp/hcl/.gitignore
create mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml
create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE
create mode 100644 vendor/github.com/hashicorp/hcl/Makefile
create mode 100644 vendor/github.com/hashicorp/hcl/README.md
create mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml
create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go
create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go
create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go
create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go
create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go
create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go
create mode 100644 vendor/github.com/hashicorp/hcl/lex.go
create mode 100644 vendor/github.com/hashicorp/hcl/parse.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE
create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
create mode 100644 vendor/github.com/magiconair/properties/.gitignore
create mode 100644 vendor/github.com/magiconair/properties/.travis.yml
create mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md
create mode 100644 vendor/github.com/magiconair/properties/LICENSE.md
create mode 100644 vendor/github.com/magiconair/properties/README.md
create mode 100644 vendor/github.com/magiconair/properties/decode.go
create mode 100644 vendor/github.com/magiconair/properties/doc.go
create mode 100644 vendor/github.com/magiconair/properties/integrate.go
create mode 100644 vendor/github.com/magiconair/properties/lex.go
create mode 100644 vendor/github.com/magiconair/properties/load.go
create mode 100644 vendor/github.com/magiconair/properties/parser.go
create mode 100644 vendor/github.com/magiconair/properties/properties.go
create mode 100644 vendor/github.com/magiconair/properties/rangecheck.go
create mode 100644 vendor/github.com/pelletier/go-toml/.dockerignore
create mode 100644 vendor/github.com/pelletier/go-toml/.gitignore
create mode 100644 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
create mode 100644 vendor/github.com/pelletier/go-toml/Dockerfile
create mode 100644 vendor/github.com/pelletier/go-toml/LICENSE
create mode 100644 vendor/github.com/pelletier/go-toml/Makefile
create mode 100644 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
create mode 100644 vendor/github.com/pelletier/go-toml/README.md
create mode 100644 vendor/github.com/pelletier/go-toml/azure-pipelines.yml
create mode 100644 vendor/github.com/pelletier/go-toml/benchmark.sh
create mode 100644 vendor/github.com/pelletier/go-toml/doc.go
create mode 100644 vendor/github.com/pelletier/go-toml/example-crlf.toml
create mode 100644 vendor/github.com/pelletier/go-toml/example.toml
create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go
create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.sh
create mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go
create mode 100644 vendor/github.com/pelletier/go-toml/lexer.go
create mode 100644 vendor/github.com/pelletier/go-toml/localtime.go
create mode 100644 vendor/github.com/pelletier/go-toml/marshal.go
create mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
create mode 100644 vendor/github.com/pelletier/go-toml/marshal_test.toml
create mode 100644 vendor/github.com/pelletier/go-toml/parser.go
create mode 100644 vendor/github.com/pelletier/go-toml/position.go
create mode 100644 vendor/github.com/pelletier/go-toml/token.go
create mode 100644 vendor/github.com/pelletier/go-toml/toml.go
create mode 100644 vendor/github.com/pelletier/go-toml/tomlpub.go
create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go
create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go
create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_writepub.go
create mode 100644 vendor/github.com/spf13/afero/.gitignore
create mode 100644 vendor/github.com/spf13/afero/.travis.yml
create mode 100644 vendor/github.com/spf13/afero/LICENSE.txt
create mode 100644 vendor/github.com/spf13/afero/README.md
create mode 100644 vendor/github.com/spf13/afero/afero.go
create mode 100644 vendor/github.com/spf13/afero/appveyor.yml
create mode 100644 vendor/github.com/spf13/afero/basepath.go
create mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go
create mode 100644 vendor/github.com/spf13/afero/const_bsds.go
create mode 100644 vendor/github.com/spf13/afero/const_win_unix.go
create mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go
create mode 100644 vendor/github.com/spf13/afero/httpFs.go
create mode 100644 vendor/github.com/spf13/afero/iofs.go
create mode 100644 vendor/github.com/spf13/afero/ioutil.go
create mode 100644 vendor/github.com/spf13/afero/lstater.go
create mode 100644 vendor/github.com/spf13/afero/match.go
create mode 100644 vendor/github.com/spf13/afero/mem/dir.go
create mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go
create mode 100644 vendor/github.com/spf13/afero/mem/file.go
create mode 100644 vendor/github.com/spf13/afero/memmap.go
create mode 100644 vendor/github.com/spf13/afero/os.go
create mode 100644 vendor/github.com/spf13/afero/path.go
create mode 100644 vendor/github.com/spf13/afero/readonlyfs.go
create mode 100644 vendor/github.com/spf13/afero/regexpfs.go
create mode 100644 vendor/github.com/spf13/afero/symlink.go
create mode 100644 vendor/github.com/spf13/afero/unionFile.go
create mode 100644 vendor/github.com/spf13/afero/util.go
create mode 100644 vendor/github.com/spf13/cast/.gitignore
rename vendor/github.com/{cpuguy83/go-md2man/v2/LICENSE.md => spf13/cast/LICENSE} (96%)
create mode 100644 vendor/github.com/spf13/cast/Makefile
create mode 100644 vendor/github.com/spf13/cast/README.md
create mode 100644 vendor/github.com/spf13/cast/cast.go
create mode 100644 vendor/github.com/spf13/cast/caste.go
create mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go
create mode 100644 vendor/github.com/spf13/cobra/.gitignore
create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml
create mode 100644 vendor/github.com/spf13/cobra/.mailmap
create mode 100644 vendor/github.com/spf13/cobra/CHANGELOG.md
create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md
create mode 100644 vendor/github.com/spf13/cobra/CONTRIBUTING.md
create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt
create mode 100644 vendor/github.com/spf13/cobra/Makefile
create mode 100644 vendor/github.com/spf13/cobra/README.md
create mode 100644 vendor/github.com/spf13/cobra/args.go
create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go
create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md
create mode 100644 vendor/github.com/spf13/cobra/bash_completionsV2.go
create mode 100644 vendor/github.com/spf13/cobra/cobra.go
create mode 100644 vendor/github.com/spf13/cobra/command.go
create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go
create mode 100644 vendor/github.com/spf13/cobra/command_win.go
create mode 100644 vendor/github.com/spf13/cobra/completions.go
create mode 100644 vendor/github.com/spf13/cobra/fish_completions.go
create mode 100644 vendor/github.com/spf13/cobra/fish_completions.md
create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go
create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md
create mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md
create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go
create mode 100644 vendor/github.com/spf13/cobra/shell_completions.md
create mode 100644 vendor/github.com/spf13/cobra/user_guide.md
create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go
create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md
create mode 100644 vendor/github.com/spf13/jwalterweatherman/.gitignore
rename vendor/github.com/{urfave/cli/v2 => spf13/jwalterweatherman}/LICENSE (93%)
create mode 100644 vendor/github.com/spf13/jwalterweatherman/README.md
create mode 100644 vendor/github.com/spf13/jwalterweatherman/default_notepad.go
create mode 100644 vendor/github.com/spf13/jwalterweatherman/log_counter.go
create mode 100644 vendor/github.com/spf13/jwalterweatherman/notepad.go
create mode 100644 vendor/github.com/spf13/pflag/.gitignore
create mode 100644 vendor/github.com/spf13/pflag/.travis.yml
create mode 100644 vendor/github.com/spf13/pflag/LICENSE
create mode 100644 vendor/github.com/spf13/pflag/README.md
create mode 100644 vendor/github.com/spf13/pflag/bool.go
create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go
create mode 100644 vendor/github.com/spf13/pflag/bytes.go
create mode 100644 vendor/github.com/spf13/pflag/count.go
create mode 100644 vendor/github.com/spf13/pflag/duration.go
create mode 100644 vendor/github.com/spf13/pflag/duration_slice.go
create mode 100644 vendor/github.com/spf13/pflag/flag.go
create mode 100644 vendor/github.com/spf13/pflag/float32.go
create mode 100644 vendor/github.com/spf13/pflag/float32_slice.go
create mode 100644 vendor/github.com/spf13/pflag/float64.go
create mode 100644 vendor/github.com/spf13/pflag/float64_slice.go
create mode 100644 vendor/github.com/spf13/pflag/golangflag.go
create mode 100644 vendor/github.com/spf13/pflag/int.go
create mode 100644 vendor/github.com/spf13/pflag/int16.go
create mode 100644 vendor/github.com/spf13/pflag/int32.go
create mode 100644 vendor/github.com/spf13/pflag/int32_slice.go
create mode 100644 vendor/github.com/spf13/pflag/int64.go
create mode 100644 vendor/github.com/spf13/pflag/int64_slice.go
create mode 100644 vendor/github.com/spf13/pflag/int8.go
create mode 100644 vendor/github.com/spf13/pflag/int_slice.go
create mode 100644 vendor/github.com/spf13/pflag/ip.go
create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go
create mode 100644 vendor/github.com/spf13/pflag/ipmask.go
create mode 100644 vendor/github.com/spf13/pflag/ipnet.go
create mode 100644 vendor/github.com/spf13/pflag/string.go
create mode 100644 vendor/github.com/spf13/pflag/string_array.go
create mode 100644 vendor/github.com/spf13/pflag/string_slice.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_int.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_int64.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_string.go
create mode 100644 vendor/github.com/spf13/pflag/uint.go
create mode 100644 vendor/github.com/spf13/pflag/uint16.go
create mode 100644 vendor/github.com/spf13/pflag/uint32.go
create mode 100644 vendor/github.com/spf13/pflag/uint64.go
create mode 100644 vendor/github.com/spf13/pflag/uint8.go
create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go
create mode 100644 vendor/github.com/spf13/viper/.editorconfig
create mode 100644 vendor/github.com/spf13/viper/.gitignore
create mode 100644 vendor/github.com/spf13/viper/.golangci.yml
create mode 100644 vendor/github.com/spf13/viper/LICENSE
create mode 100644 vendor/github.com/spf13/viper/Makefile
create mode 100644 vendor/github.com/spf13/viper/README.md
create mode 100644 vendor/github.com/spf13/viper/TROUBLESHOOTING.md
create mode 100644 vendor/github.com/spf13/viper/flags.go
create mode 100644 vendor/github.com/spf13/viper/internal/encoding/decoder.go
create mode 100644 vendor/github.com/spf13/viper/internal/encoding/encoder.go
create mode 100644 vendor/github.com/spf13/viper/internal/encoding/error.go
create mode 100644 vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go
create mode 100644 vendor/github.com/spf13/viper/internal/encoding/json/codec.go
create mode 100644 vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
create mode 100644 vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
create mode 100644 vendor/github.com/spf13/viper/util.go
create mode 100644 vendor/github.com/spf13/viper/viper.go
create mode 100644 vendor/github.com/spf13/viper/watch.go
create mode 100644 vendor/github.com/spf13/viper/watch_wasm.go
create mode 100644 vendor/github.com/subosito/gotenv/.env
create mode 100644 vendor/github.com/subosito/gotenv/.env.invalid
create mode 100644 vendor/github.com/subosito/gotenv/.gitignore
create mode 100644 vendor/github.com/subosito/gotenv/.travis.yml
create mode 100644 vendor/github.com/subosito/gotenv/CHANGELOG.md
create mode 100644 vendor/github.com/subosito/gotenv/LICENSE
create mode 100644 vendor/github.com/subosito/gotenv/README.md
create mode 100644 vendor/github.com/subosito/gotenv/appveyor.yml
create mode 100644 vendor/github.com/subosito/gotenv/gotenv.go
delete mode 100644 vendor/github.com/urfave/cli/v2/.flake8
delete mode 100644 vendor/github.com/urfave/cli/v2/.gitignore
delete mode 100644 vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md
delete mode 100644 vendor/github.com/urfave/cli/v2/README.md
delete mode 100644 vendor/github.com/urfave/cli/v2/app.go
delete mode 100644 vendor/github.com/urfave/cli/v2/args.go
delete mode 100644 vendor/github.com/urfave/cli/v2/category.go
delete mode 100644 vendor/github.com/urfave/cli/v2/cli.go
delete mode 100644 vendor/github.com/urfave/cli/v2/command.go
delete mode 100644 vendor/github.com/urfave/cli/v2/context.go
delete mode 100644 vendor/github.com/urfave/cli/v2/docs.go
delete mode 100644 vendor/github.com/urfave/cli/v2/errors.go
delete mode 100644 vendor/github.com/urfave/cli/v2/fish.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_bool.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_duration.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_float64.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_float64_slice.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_generic.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_int.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_int64.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_int64_slice.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_int_slice.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_path.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_string.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_string_slice.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_timestamp.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_uint.go
delete mode 100644 vendor/github.com/urfave/cli/v2/flag_uint64.go
delete mode 100644 vendor/github.com/urfave/cli/v2/funcs.go
delete mode 100644 vendor/github.com/urfave/cli/v2/help.go
delete mode 100644 vendor/github.com/urfave/cli/v2/parse.go
delete mode 100644 vendor/github.com/urfave/cli/v2/sort.go
delete mode 100644 vendor/github.com/urfave/cli/v2/template.go
create mode 100644 vendor/gopkg.in/ini.v1/.gitignore
create mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml
create mode 100644 vendor/gopkg.in/ini.v1/LICENSE
create mode 100644 vendor/gopkg.in/ini.v1/Makefile
create mode 100644 vendor/gopkg.in/ini.v1/README.md
create mode 100644 vendor/gopkg.in/ini.v1/codecov.yml
create mode 100644 vendor/gopkg.in/ini.v1/data_source.go
create mode 100644 vendor/gopkg.in/ini.v1/deprecated.go
create mode 100644 vendor/gopkg.in/ini.v1/error.go
create mode 100644 vendor/gopkg.in/ini.v1/file.go
create mode 100644 vendor/gopkg.in/ini.v1/helper.go
create mode 100644 vendor/gopkg.in/ini.v1/ini.go
create mode 100644 vendor/gopkg.in/ini.v1/key.go
create mode 100644 vendor/gopkg.in/ini.v1/parser.go
create mode 100644 vendor/gopkg.in/ini.v1/section.go
create mode 100644 vendor/gopkg.in/ini.v1/struct.go
diff --git a/.drone.yml b/.drone.yml
index 7f82fa497..7e30bd178 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -37,6 +37,7 @@ steps:
path: /go
commands:
- CGO_ENABLED=0 GTS_DB_TYPE="sqlite" GTS_DB_ADDRESS=":memory:" go test ./...
+ - CGO_ENABLED=0 ./test/cliparsing.sh
when:
event:
include:
@@ -115,6 +116,6 @@ trigger:
---
kind: signature
-hmac: 8a363baa3c7b5e581bd101a7774422042833b7d297faf5d93c9156cf54a31124
+hmac: 3c989818a1940fc572110fa35b76b6ea82e9c45a3b1074e3af6328550f12674c
...
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index dc03a1eae..0c19dd987 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -181,6 +181,12 @@ Finally, to run tests against both database types one after the other, use:
./scripts/test.sh
```
+### CLI Tests
+
+In [./test/cliparsing.sh](./test/cliparsing.sh) there are a bunch of tests for making sure that CLI flags, config, and environment variables get parsed as expected.
+
+Although these tests *are* part of the CI/CD testing process, you probably won't need to worry too much about running them yourself. That is, unless you're messing about with code inside the `main` package in `cmd/gotosocial`, or inside the `config` package in `internal/config`.
+
## Project Structure
For project structure, GoToSocial follows a standard and widely-accepted project layout [defined here](https://github.com/golang-standards/project-layout). As the author writes:
diff --git a/README.md b/README.md
index 1ee0d2d22..a430edb11 100644
--- a/README.md
+++ b/README.md
@@ -196,6 +196,9 @@ The following libraries and frameworks are used by GoToSocial, with gratitude
- [ReneKroon/ttlcache](https://github.com/ReneKroon/ttlcache); in-memory caching. [MIT License](https://spdx.org/licenses/MIT.html).
- [russross/blackfriday](https://github.com/russross/blackfriday); markdown parsing for statuses. [Simplified BSD License](https://spdx.org/licenses/BSD-2-Clause.html).
- [sirupsen/logrus](https://github.com/sirupsen/logrus); logging. [MIT License](https://spdx.org/licenses/MIT.html).
+- [spf13/cobra](https://github.com/spf13/cobra); command-line tooling. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
+- [spf13/pflag](https://github.com/spf13/pflag); command-line flag utilities. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
+- [spf13/viper](https://github.com/spf13/viper); configuration management. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
- [stretchr/testify](https://github.com/stretchr/testify); test framework. [MIT License](https://spdx.org/licenses/MIT.html).
- [superseriousbusiness/exifremove](https://github.com/superseriousbusiness/exifremove) forked from [scottleedavis/go-exif-remove](https://github.com/scottleedavis/go-exif-remove); EXIF data removal. [MIT License](https://spdx.org/licenses/MIT.html).
- [superseriousbusiness/activity](https://github.com/superseriousbusiness/activity) forked from [go-fed/activity](https://github.com/go-fed/activity); Golang ActivityPub/ActivityStreams library. [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html).
@@ -203,7 +206,6 @@ The following libraries and frameworks are used by GoToSocial, with gratitude
- [go-swagger/go-swagger](https://github.com/go-swagger/go-swagger); Swagger OpenAPI spec generation. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
- [tdewolff/minify](https://github.com/tdewolff/minify); HTML minification. [MIT License](https://spdx.org/licenses/MIT.html).
- [uptrace/bun](https://github.com/uptrace/bun); database ORM. [BSD-2-Clause License](https://spdx.org/licenses/BSD-2-Clause.html).
-- [urfave/cli](https://github.com/urfave/cli); command-line interface framework. [MIT License](https://spdx.org/licenses/MIT.html).
- [wagslane/go-password-validator](https://github.com/wagslane/go-password-validator); password strength validation. [MIT License](https://spdx.org/licenses/MIT.html).
### Image Attribution
diff --git a/cmd/gotosocial/accountsflags.go b/cmd/gotosocial/accountsflags.go
deleted file mode 100644
index 9f9be266d..000000000
--- a/cmd/gotosocial/accountsflags.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func accountsFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.BoolFlag{
- Name: flagNames.AccountsOpenRegistration,
- Usage: "Allow anyone to submit an account signup request. If false, server will be invite-only.",
- Value: defaults.AccountsOpenRegistration,
- EnvVars: []string{envNames.AccountsOpenRegistration},
- },
- &cli.BoolFlag{
- Name: flagNames.AccountsApprovalRequired,
- Usage: "Do account signups require approval by an admin or moderator before user can log in? If false, new registrations will be automatically approved.",
- Value: defaults.AccountsRequireApproval,
- EnvVars: []string{envNames.AccountsApprovalRequired},
- },
- &cli.BoolFlag{
- Name: flagNames.AccountsReasonRequired,
- Usage: "Do new account signups require a reason to be submitted on registration?",
- Value: defaults.AccountsReasonRequired,
- EnvVars: []string{envNames.AccountsReasonRequired},
- },
- }
-}
diff --git a/internal/cliactions/action.go b/cmd/gotosocial/action/action.go
similarity index 87%
rename from internal/cliactions/action.go
rename to cmd/gotosocial/action/action.go
index b72ae92b8..315b30de6 100644
--- a/internal/cliactions/action.go
+++ b/cmd/gotosocial/action/action.go
@@ -16,15 +16,13 @@
along with this program. If not, see .
*/
-package cliactions
+package action
import (
"context"
-
- "github.com/superseriousbusiness/gotosocial/internal/config"
)
// GTSAction defines one *action* that can be taken by the gotosocial cli command.
// This can be either a long-running action (like server start) or something
// shorter like db init or db inspect.
-type GTSAction func(context.Context, *config.Config) error
+type GTSAction func(context.Context) error
diff --git a/internal/cliactions/admin/account/account.go b/cmd/gotosocial/action/admin/account/account.go
similarity index 77%
rename from internal/cliactions/admin/account/account.go
rename to cmd/gotosocial/action/admin/account/account.go
index 5bdee9a41..6b1029712 100644
--- a/internal/cliactions/admin/account/account.go
+++ b/cmd/gotosocial/action/admin/account/account.go
@@ -24,7 +24,8 @@ import (
"fmt"
"time"
- "github.com/superseriousbusiness/gotosocial/internal/cliactions"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/db/bundb"
@@ -34,30 +35,30 @@ import (
)
// Create creates a new account in the database using the provided flags.
-var Create cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Create action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
- username, ok := c.AccountCLIFlags[config.UsernameFlag]
- if !ok {
+ username := viper.GetString(config.Keys.AdminAccountUsername)
+ if username == "" {
return errors.New("no username set")
}
if err := validate.Username(username); err != nil {
return err
}
- email, ok := c.AccountCLIFlags[config.EmailFlag]
- if !ok {
+ email := viper.GetString(config.Keys.AdminAccountEmail)
+ if email == "" {
return errors.New("no email set")
}
if err := validate.Email(email); err != nil {
return err
}
- password, ok := c.AccountCLIFlags[config.PasswordFlag]
- if !ok {
+ password := viper.GetString(config.Keys.AdminAccountPassword)
+ if password == "" {
return errors.New("no password set")
}
if err := validate.NewPassword(password); err != nil {
@@ -73,14 +74,14 @@ var Create cliactions.GTSAction = func(ctx context.Context, c *config.Config) er
}
// Confirm sets a user to Approved, sets Email to the current UnconfirmedEmail value, and sets ConfirmedAt to now.
-var Confirm cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Confirm action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
- username, ok := c.AccountCLIFlags[config.UsernameFlag]
- if !ok {
+ username := viper.GetString(config.Keys.AdminAccountUsername)
+ if username == "" {
return errors.New("no username set")
}
if err := validate.Username(username); err != nil {
@@ -108,14 +109,14 @@ var Confirm cliactions.GTSAction = func(ctx context.Context, c *config.Config) e
}
// Promote sets a user to admin.
-var Promote cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Promote action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
- username, ok := c.AccountCLIFlags[config.UsernameFlag]
- if !ok {
+ username := viper.GetString(config.Keys.AdminAccountUsername)
+ if username == "" {
return errors.New("no username set")
}
if err := validate.Username(username); err != nil {
@@ -140,14 +141,14 @@ var Promote cliactions.GTSAction = func(ctx context.Context, c *config.Config) e
}
// Demote sets admin on a user to false.
-var Demote cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Demote action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
- username, ok := c.AccountCLIFlags[config.UsernameFlag]
- if !ok {
+ username := viper.GetString(config.Keys.AdminAccountUsername)
+ if username == "" {
return errors.New("no username set")
}
if err := validate.Username(username); err != nil {
@@ -172,14 +173,14 @@ var Demote cliactions.GTSAction = func(ctx context.Context, c *config.Config) er
}
// Disable sets Disabled to true on a user.
-var Disable cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Disable action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
- username, ok := c.AccountCLIFlags[config.UsernameFlag]
- if !ok {
+ username := viper.GetString(config.Keys.AdminAccountUsername)
+ if username == "" {
return errors.New("no username set")
}
if err := validate.Username(username); err != nil {
@@ -204,28 +205,28 @@ var Disable cliactions.GTSAction = func(ctx context.Context, c *config.Config) e
}
// Suspend suspends the target account, cleanly removing all of its media, followers, following, likes, statuses, etc.
-var Suspend cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
+var Suspend action.GTSAction = func(ctx context.Context) error {
// TODO
return nil
}
// Password sets the password of target account.
-var Password cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Password action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
- username, ok := c.AccountCLIFlags[config.UsernameFlag]
- if !ok {
+ username := viper.GetString(config.Keys.AdminAccountUsername)
+ if username == "" {
return errors.New("no username set")
}
if err := validate.Username(username); err != nil {
return err
}
- password, ok := c.AccountCLIFlags[config.PasswordFlag]
- if !ok {
+ password := viper.GetString(config.Keys.AdminAccountPassword)
+ if password == "" {
return errors.New("no password set")
}
if err := validate.NewPassword(password); err != nil {
diff --git a/internal/cliactions/admin/trans/export.go b/cmd/gotosocial/action/admin/trans/export.go
similarity index 82%
rename from internal/cliactions/admin/trans/export.go
rename to cmd/gotosocial/action/admin/trans/export.go
index 89a22a357..615ba72f0 100644
--- a/internal/cliactions/admin/trans/export.go
+++ b/cmd/gotosocial/action/admin/trans/export.go
@@ -23,23 +23,24 @@ import (
"errors"
"fmt"
- "github.com/superseriousbusiness/gotosocial/internal/cliactions"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db/bundb"
"github.com/superseriousbusiness/gotosocial/internal/trans"
)
// Export exports info from the database into a file
-var Export cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Export action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
exporter := trans.NewExporter(dbConn)
- path, ok := c.ExportCLIFlags[config.TransPathFlag]
- if !ok {
+ path := viper.GetString(config.Keys.AdminTransPath)
+ if path == "" {
return errors.New("no path set")
}
diff --git a/internal/cliactions/admin/trans/import.go b/cmd/gotosocial/action/admin/trans/import.go
similarity index 82%
rename from internal/cliactions/admin/trans/import.go
rename to cmd/gotosocial/action/admin/trans/import.go
index 9ab85d62f..688083130 100644
--- a/internal/cliactions/admin/trans/import.go
+++ b/cmd/gotosocial/action/admin/trans/import.go
@@ -23,23 +23,24 @@ import (
"errors"
"fmt"
- "github.com/superseriousbusiness/gotosocial/internal/cliactions"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db/bundb"
"github.com/superseriousbusiness/gotosocial/internal/trans"
)
// Import imports info from a file into the database
-var Import cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbConn, err := bundb.NewBunDBService(ctx, c)
+var Import action.GTSAction = func(ctx context.Context) error {
+ dbConn, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
importer := trans.NewImporter(dbConn)
- path, ok := c.ExportCLIFlags[config.TransPathFlag]
- if !ok {
+ path := viper.GetString(config.Keys.AdminTransPath)
+ if path == "" {
return errors.New("no path set")
}
diff --git a/cmd/gotosocial/commands.go b/cmd/gotosocial/action/debug/config/config.go
similarity index 64%
rename from cmd/gotosocial/commands.go
rename to cmd/gotosocial/action/debug/config/config.go
index 9b61a66ec..749ea124d 100644
--- a/cmd/gotosocial/commands.go
+++ b/cmd/gotosocial/action/debug/config/config.go
@@ -16,22 +16,24 @@
along with this program. If not, see .
*/
-package main
+package config
import (
- "github.com/urfave/cli/v2"
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
)
-func getCommands(allFlags []cli.Flag) []*cli.Command {
- commands := []*cli.Command{}
- commandSets := [][]*cli.Command{
- serverCommands(allFlags),
- adminCommands(allFlags),
- testrigCommands(allFlags),
+// Config just prints the collated config out to stdout as json.
+var Config action.GTSAction = func(ctx context.Context) error {
+ allSettings := viper.AllSettings()
+ b, err := json.Marshal(&allSettings)
+ if err != nil {
+ return err
}
- for _, cs := range commandSets {
- commands = append(commands, cs...)
- }
-
- return commands
+ fmt.Println(string(b))
+ return nil
}
diff --git a/internal/cliactions/server/server.go b/cmd/gotosocial/action/server/server.go
similarity index 66%
rename from internal/cliactions/server/server.go
rename to cmd/gotosocial/action/server/server.go
index 3e6acfe84..57384ac6f 100644
--- a/internal/cliactions/server/server.go
+++ b/cmd/gotosocial/action/server/server.go
@@ -1,3 +1,21 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
package server
import (
@@ -10,6 +28,8 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
"github.com/superseriousbusiness/gotosocial/internal/api"
"github.com/superseriousbusiness/gotosocial/internal/api/client/account"
"github.com/superseriousbusiness/gotosocial/internal/api/client/admin"
@@ -34,7 +54,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user"
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/webfinger"
"github.com/superseriousbusiness/gotosocial/internal/api/security"
- "github.com/superseriousbusiness/gotosocial/internal/cliactions"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db/bundb"
"github.com/superseriousbusiness/gotosocial/internal/email"
@@ -53,8 +72,8 @@ import (
)
// Start creates and starts a gotosocial server
-var Start cliactions.GTSAction = func(ctx context.Context, c *config.Config) error {
- dbService, err := bundb.NewBunDBService(ctx, c)
+var Start action.GTSAction = func(ctx context.Context) error {
+ dbService, err := bundb.NewBunDBService(ctx)
if err != nil {
return fmt.Errorf("error creating dbservice: %s", err)
}
@@ -67,81 +86,83 @@ var Start cliactions.GTSAction = func(ctx context.Context, c *config.Config) err
return fmt.Errorf("error creating instance instance: %s", err)
}
- federatingDB := federatingdb.New(dbService, c)
+ federatingDB := federatingdb.New(dbService)
- router, err := router.New(ctx, c, dbService)
+ router, err := router.New(ctx, dbService)
if err != nil {
return fmt.Errorf("error creating router: %s", err)
}
// build converters and util
- typeConverter := typeutils.NewConverter(c, dbService)
- timelineManager := timelineprocessing.NewManager(dbService, typeConverter, c)
+ typeConverter := typeutils.NewConverter(dbService)
+ timelineManager := timelineprocessing.NewManager(dbService, typeConverter)
// Open the storage backend
- storage, err := kv.OpenFile(c.StorageConfig.BasePath, nil)
+ storageBasePath := viper.GetString(config.Keys.StorageBasePath)
+ storage, err := kv.OpenFile(storageBasePath, nil)
if err != nil {
return fmt.Errorf("error creating storage backend: %s", err)
}
// build backend handlers
- mediaHandler := media.New(c, dbService, storage)
+ mediaHandler := media.New(dbService, storage)
oauthServer := oauth.New(ctx, dbService)
- transportController := transport.NewController(c, dbService, &federation.Clock{}, http.DefaultClient)
- federator := federation.NewFederator(dbService, federatingDB, transportController, c, typeConverter, mediaHandler)
+ transportController := transport.NewController(dbService, &federation.Clock{}, http.DefaultClient)
+ federator := federation.NewFederator(dbService, federatingDB, transportController, typeConverter, mediaHandler)
// decide whether to create a noop email sender (won't send emails) or a real one
var emailSender email.Sender
- if c.SMTPConfig.Host != "" {
+ smtpHost := viper.GetString(config.Keys.SMTPHost)
+ if smtpHost != "" {
// host is defined so create a proper sender
- emailSender, err = email.NewSender(c)
+ emailSender, err = email.NewSender()
if err != nil {
return fmt.Errorf("error creating email sender: %s", err)
}
} else {
// no host is defined so create a noop sender
- emailSender, err = email.NewNoopSender(c.TemplateConfig.BaseDir, nil)
+ emailSender, err = email.NewNoopSender(nil)
if err != nil {
return fmt.Errorf("error creating noop email sender: %s", err)
}
}
// create and start the message processor using the other services we've created so far
- processor := processing.NewProcessor(c, typeConverter, federator, oauthServer, mediaHandler, storage, timelineManager, dbService, emailSender)
+ processor := processing.NewProcessor(typeConverter, federator, oauthServer, mediaHandler, storage, timelineManager, dbService, emailSender)
if err := processor.Start(ctx); err != nil {
return fmt.Errorf("error starting processor: %s", err)
}
- idp, err := oidc.NewIDP(ctx, c)
+ idp, err := oidc.NewIDP(ctx)
if err != nil {
return fmt.Errorf("error creating oidc idp: %s", err)
}
// build client api modules
- authModule := auth.New(c, dbService, oauthServer, idp)
- accountModule := account.New(c, processor)
- instanceModule := instance.New(c, processor)
- appsModule := app.New(c, processor)
- followRequestsModule := followrequest.New(c, processor)
- webfingerModule := webfinger.New(c, processor)
- nodeInfoModule := nodeinfo.New(c, processor)
- webBaseModule := web.New(c, processor)
- usersModule := user.New(c, processor)
- timelineModule := timeline.New(c, processor)
- notificationModule := notification.New(c, processor)
- searchModule := search.New(c, processor)
- filtersModule := filter.New(c, processor)
- emojiModule := emoji.New(c, processor)
- listsModule := list.New(c, processor)
- mm := mediaModule.New(c, processor)
- fileServerModule := fileserver.New(c, processor)
- adminModule := admin.New(c, processor)
- statusModule := status.New(c, processor)
- securityModule := security.New(c, dbService, oauthServer)
- streamingModule := streaming.New(c, processor)
- favouritesModule := favourites.New(c, processor)
- blocksModule := blocks.New(c, processor)
- userClientModule := userClient.New(c, processor)
+ authModule := auth.New(dbService, oauthServer, idp)
+ accountModule := account.New(processor)
+ instanceModule := instance.New(processor)
+ appsModule := app.New(processor)
+ followRequestsModule := followrequest.New(processor)
+ webfingerModule := webfinger.New(processor)
+ nodeInfoModule := nodeinfo.New(processor)
+ webBaseModule := web.New(processor)
+ usersModule := user.New(processor)
+ timelineModule := timeline.New(processor)
+ notificationModule := notification.New(processor)
+ searchModule := search.New(processor)
+ filtersModule := filter.New(processor)
+ emojiModule := emoji.New(processor)
+ listsModule := list.New(processor)
+ mm := mediaModule.New(processor)
+ fileServerModule := fileserver.New(processor)
+ adminModule := admin.New(processor)
+ statusModule := status.New(processor)
+ securityModule := security.New(dbService, oauthServer)
+ streamingModule := streaming.New(processor)
+ favouritesModule := favourites.New(processor)
+ blocksModule := blocks.New(processor)
+ userClientModule := userClient.New(processor)
apis := []api.ClientModule{
// modules with middleware go first
@@ -179,7 +200,7 @@ var Start cliactions.GTSAction = func(ctx context.Context, c *config.Config) err
}
}
- gts, err := gotosocial.NewServer(dbService, router, federator, c)
+ gts, err := gotosocial.NewServer(dbService, router, federator)
if err != nil {
return fmt.Errorf("error creating gotosocial service: %s", err)
}
diff --git a/internal/cliactions/testrig/testrig.go b/cmd/gotosocial/action/testrig/testrig.go
similarity index 69%
rename from internal/cliactions/testrig/testrig.go
rename to cmd/gotosocial/action/testrig/testrig.go
index 5e55c3d03..626a22781 100644
--- a/internal/cliactions/testrig/testrig.go
+++ b/cmd/gotosocial/action/testrig/testrig.go
@@ -1,3 +1,21 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
package testrig
import (
@@ -11,6 +29,7 @@ import (
"syscall"
"github.com/sirupsen/logrus"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
"github.com/superseriousbusiness/gotosocial/internal/api"
"github.com/superseriousbusiness/gotosocial/internal/api/client/account"
"github.com/superseriousbusiness/gotosocial/internal/api/client/admin"
@@ -35,8 +54,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user"
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/webfinger"
"github.com/superseriousbusiness/gotosocial/internal/api/security"
- "github.com/superseriousbusiness/gotosocial/internal/cliactions"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gotosocial"
"github.com/superseriousbusiness/gotosocial/internal/oidc"
"github.com/superseriousbusiness/gotosocial/internal/web"
@@ -44,10 +61,10 @@ import (
)
// Start creates and starts a gotosocial testrig server
-var Start cliactions.GTSAction = func(ctx context.Context, _ *config.Config) error {
+var Start action.GTSAction = func(ctx context.Context) error {
+ testrig.InitTestConfig()
testrig.InitTestLog()
- c := testrig.NewTestConfig()
dbService := testrig.NewTestDB()
testrig.StandardDBSetup(dbService, nil)
router := testrig.NewTestRouter(dbService)
@@ -72,36 +89,36 @@ var Start cliactions.GTSAction = func(ctx context.Context, _ *config.Config) err
return fmt.Errorf("error starting processor: %s", err)
}
- idp, err := oidc.NewIDP(ctx, c)
+ idp, err := oidc.NewIDP(ctx)
if err != nil {
return fmt.Errorf("error creating oidc idp: %s", err)
}
// build client api modules
- authModule := auth.New(c, dbService, oauthServer, idp)
- accountModule := account.New(c, processor)
- instanceModule := instance.New(c, processor)
- appsModule := app.New(c, processor)
- followRequestsModule := followrequest.New(c, processor)
- webfingerModule := webfinger.New(c, processor)
- nodeInfoModule := nodeinfo.New(c, processor)
- webBaseModule := web.New(c, processor)
- usersModule := user.New(c, processor)
- timelineModule := timeline.New(c, processor)
- notificationModule := notification.New(c, processor)
- searchModule := search.New(c, processor)
- filtersModule := filter.New(c, processor)
- emojiModule := emoji.New(c, processor)
- listsModule := list.New(c, processor)
- mm := mediaModule.New(c, processor)
- fileServerModule := fileserver.New(c, processor)
- adminModule := admin.New(c, processor)
- statusModule := status.New(c, processor)
- securityModule := security.New(c, dbService, oauthServer)
- streamingModule := streaming.New(c, processor)
- favouritesModule := favourites.New(c, processor)
- blocksModule := blocks.New(c, processor)
- userClientModule := userClient.New(c, processor)
+ authModule := auth.New(dbService, oauthServer, idp)
+ accountModule := account.New(processor)
+ instanceModule := instance.New(processor)
+ appsModule := app.New(processor)
+ followRequestsModule := followrequest.New(processor)
+ webfingerModule := webfinger.New(processor)
+ nodeInfoModule := nodeinfo.New(processor)
+ webBaseModule := web.New(processor)
+ usersModule := user.New(processor)
+ timelineModule := timeline.New(processor)
+ notificationModule := notification.New(processor)
+ searchModule := search.New(processor)
+ filtersModule := filter.New(processor)
+ emojiModule := emoji.New(processor)
+ listsModule := list.New(processor)
+ mm := mediaModule.New(processor)
+ fileServerModule := fileserver.New(processor)
+ adminModule := admin.New(processor)
+ statusModule := status.New(processor)
+ securityModule := security.New(dbService, oauthServer)
+ streamingModule := streaming.New(processor)
+ favouritesModule := favourites.New(processor)
+ blocksModule := blocks.New(processor)
+ userClientModule := userClient.New(processor)
apis := []api.ClientModule{
// modules with middleware go first
@@ -139,7 +156,7 @@ var Start cliactions.GTSAction = func(ctx context.Context, _ *config.Config) err
}
}
- gts, err := gotosocial.NewServer(dbService, router, federator, c)
+ gts, err := gotosocial.NewServer(dbService, router, federator)
if err != nil {
return fmt.Errorf("error creating gotosocial service: %s", err)
}
diff --git a/cmd/gotosocial/admin.go b/cmd/gotosocial/admin.go
new file mode 100644
index 000000000..9ee4461ea
--- /dev/null
+++ b/cmd/gotosocial/admin.go
@@ -0,0 +1,169 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action/admin/account"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action/admin/trans"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/flag"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+)
+
+func adminCommands() *cobra.Command {
+ adminCmd := &cobra.Command{
+ Use: "admin",
+ Short: "gotosocial admin-related tasks",
+ }
+
+ /*
+ ADMIN ACCOUNT COMMANDS
+ */
+
+ adminAccountCmd := &cobra.Command{
+ Use: "account",
+ Short: "admin commands related to accounts",
+ }
+ flag.AdminAccount(adminAccountCmd, config.Defaults)
+
+ adminAccountCreateCmd := &cobra.Command{
+ Use: "create",
+ Short: "create a new account",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), account.Create)
+ },
+ }
+ flag.AdminAccountCreate(adminAccountCreateCmd, config.Defaults)
+ adminAccountCmd.AddCommand(adminAccountCreateCmd)
+
+ adminAccountConfirmCmd := &cobra.Command{
+ Use: "confirm",
+ Short: "confirm an existing account manually, thereby skipping email confirmation",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), account.Confirm)
+ },
+ }
+ flag.AdminAccount(adminAccountConfirmCmd, config.Defaults)
+ adminAccountCmd.AddCommand(adminAccountConfirmCmd)
+
+ adminAccountPromoteCmd := &cobra.Command{
+ Use: "promote",
+ Short: "promote an account to admin",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), account.Promote)
+ },
+ }
+ flag.AdminAccount(adminAccountPromoteCmd, config.Defaults)
+ adminAccountCmd.AddCommand(adminAccountPromoteCmd)
+
+ adminAccountDemoteCmd := &cobra.Command{
+ Use: "demote",
+ Short: "demote an account from admin to normal user",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), account.Demote)
+ },
+ }
+ flag.AdminAccount(adminAccountDemoteCmd, config.Defaults)
+ adminAccountCmd.AddCommand(adminAccountDemoteCmd)
+
+ adminAccountDisableCmd := &cobra.Command{
+ Use: "disable",
+ Short: "prevent an account from signing in or posting etc, but don't delete anything",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), account.Disable)
+ },
+ }
+ flag.AdminAccount(adminAccountDisableCmd, config.Defaults)
+ adminAccountCmd.AddCommand(adminAccountDisableCmd)
+
+ adminAccountSuspendCmd := &cobra.Command{
+ Use: "suspend",
+ Short: "completely remove an account and all of its posts, media, etc",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), account.Suspend)
+ },
+ }
+ flag.AdminAccount(adminAccountSuspendCmd, config.Defaults)
+ adminAccountCmd.AddCommand(adminAccountSuspendCmd)
+
+ adminAccountPasswordCmd := &cobra.Command{
+ Use: "password",
+ Short: "set a new password for the given account",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), account.Password)
+ },
+ }
+ flag.AdminAccountPassword(adminAccountPasswordCmd, config.Defaults)
+ adminAccountCmd.AddCommand(adminAccountPasswordCmd)
+
+ adminCmd.AddCommand(adminAccountCmd)
+
+ /*
+ ADMIN IMPORT/EXPORT COMMANDS
+ */
+
+ adminExportCmd := &cobra.Command{
+ Use: "export",
+ Short: "export data from the database to file at the given path",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), trans.Export)
+ },
+ }
+ flag.AdminTrans(adminExportCmd, config.Defaults)
+ adminCmd.AddCommand(adminExportCmd)
+
+ adminImportCmd := &cobra.Command{
+ Use: "import",
+ Short: "import data from a file into the database",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), trans.Import)
+ },
+ }
+ flag.AdminTrans(adminImportCmd, config.Defaults)
+ adminCmd.AddCommand(adminImportCmd)
+
+ return adminCmd
+}
diff --git a/cmd/gotosocial/admincommands.go b/cmd/gotosocial/admincommands.go
deleted file mode 100644
index a70693b2c..000000000
--- a/cmd/gotosocial/admincommands.go
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/cliactions/admin/account"
- "github.com/superseriousbusiness/gotosocial/internal/cliactions/admin/trans"
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func adminCommands(allFlags []cli.Flag) []*cli.Command {
- return []*cli.Command{
- {
- Name: "admin",
- Usage: "gotosocial admin-related tasks",
- Subcommands: []*cli.Command{
- {
- Name: "account",
- Usage: "admin commands related to accounts",
- Subcommands: []*cli.Command{
- {
- Name: "create",
- Usage: "create a new account",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.UsernameFlag,
- Usage: config.UsernameUsage,
- Required: true,
- },
- &cli.StringFlag{
- Name: config.EmailFlag,
- Usage: config.EmailUsage,
- Required: true,
- },
- &cli.StringFlag{
- Name: config.PasswordFlag,
- Usage: config.PasswordUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, account.Create)
- },
- },
- {
- Name: "confirm",
- Usage: "confirm an existing account manually, thereby skipping email confirmation",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.UsernameFlag,
- Usage: config.UsernameUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, account.Confirm)
- },
- },
- {
- Name: "promote",
- Usage: "promote an account to admin",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.UsernameFlag,
- Usage: config.UsernameUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, account.Promote)
- },
- },
- {
- Name: "demote",
- Usage: "demote an account from admin to normal user",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.UsernameFlag,
- Usage: config.UsernameUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, account.Demote)
- },
- },
- {
- Name: "disable",
- Usage: "prevent an account from signing in or posting etc, but don't delete anything",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.UsernameFlag,
- Usage: config.UsernameUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, account.Disable)
- },
- },
- {
- Name: "suspend",
- Usage: "completely remove an account and all of its posts, media, etc",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.UsernameFlag,
- Usage: config.UsernameUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, account.Suspend)
- },
- },
- {
- Name: "password",
- Usage: "set a new password for the given account",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.UsernameFlag,
- Usage: config.UsernameUsage,
- Required: true,
- },
- &cli.StringFlag{
- Name: config.PasswordFlag,
- Usage: config.PasswordUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, account.Password)
- },
- },
- },
- },
- {
- Name: "export",
- Usage: "export data from the database to file at the given path",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.TransPathFlag,
- Usage: config.TransPathUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, trans.Export)
- },
- },
- {
- Name: "import",
- Usage: "import data from a file into the database",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: config.TransPathFlag,
- Usage: config.TransPathUsage,
- Required: true,
- },
- },
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, trans.Import)
- },
- },
- },
- },
- }
-}
diff --git a/cmd/gotosocial/common.go b/cmd/gotosocial/common.go
new file mode 100644
index 000000000..7b5a42652
--- /dev/null
+++ b/cmd/gotosocial/common.go
@@ -0,0 +1,64 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
+)
+
+// preRun should be run in the pre-run stage of every cobra command.
+// The goal here is to initialize the viper config store, and also read in
+// the config file (if present).
+//
+// The order of these is important: the init-config function reads the location
+// of the config file from the viper store so that it can be picked up by either
+// env vars or cli flag.
+func preRun(cmd *cobra.Command) error {
+ if err := config.InitViper(cmd.Flags()); err != nil {
+ return fmt.Errorf("error initializing viper: %s", err)
+ }
+
+ if err := config.ReadFromFile(); err != nil {
+ return fmt.Errorf("error initializing config: %s", err)
+ }
+
+ return nil
+}
+
+// run should be used during the run stage of every cobra command.
+// The idea here is to take a GTSAction and run it with the given
+// context, after initializing any last-minute things like loggers etc.
+func run(ctx context.Context, action action.GTSAction) error {
+ // if log level has been set...
+ if logLevel := viper.GetString(config.Keys.LogLevel); logLevel != "" {
+ // then try to initialize the logger to that level
+ if err := log.Initialize(logLevel); err != nil {
+ return fmt.Errorf("error initializing log: %s", err)
+ }
+ }
+
+ return action(ctx)
+}
diff --git a/cmd/gotosocial/databaseflags.go b/cmd/gotosocial/databaseflags.go
deleted file mode 100644
index 7abe64f1b..000000000
--- a/cmd/gotosocial/databaseflags.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func databaseFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.StringFlag{
- Name: flagNames.DbType,
- Usage: "Database type: eg., postgres",
- Value: defaults.DbType,
- EnvVars: []string{envNames.DbType},
- },
- &cli.StringFlag{
- Name: flagNames.DbAddress,
- Usage: "Database ipv4 address or hostname",
- Value: defaults.DbAddress,
- EnvVars: []string{envNames.DbAddress},
- },
- &cli.IntFlag{
- Name: flagNames.DbPort,
- Usage: "Database port",
- Value: defaults.DbPort,
- EnvVars: []string{envNames.DbPort},
- },
- &cli.StringFlag{
- Name: flagNames.DbUser,
- Usage: "Database username",
- Value: defaults.DbUser,
- EnvVars: []string{envNames.DbUser},
- },
- &cli.StringFlag{
- Name: flagNames.DbPassword,
- Usage: "Database password",
- Value: defaults.DbPassword,
- EnvVars: []string{envNames.DbPassword},
- },
- &cli.StringFlag{
- Name: flagNames.DbDatabase,
- Usage: "Database name",
- Value: defaults.DbDatabase,
- EnvVars: []string{envNames.DbDatabase},
- },
- &cli.StringFlag{
- Name: flagNames.DbTLSMode,
- Usage: "Database tls mode",
- Value: defaults.DBTlsMode,
- EnvVars: []string{envNames.DbTLSMode},
- },
- &cli.StringFlag{
- Name: flagNames.DbTLSCACert,
- Usage: "Path to CA cert for db tls connection",
- Value: defaults.DBTlsCACert,
- EnvVars: []string{envNames.DbTLSCACert},
- },
- }
-}
diff --git a/cmd/gotosocial/flags.go b/cmd/gotosocial/debug.go
similarity index 52%
rename from cmd/gotosocial/flags.go
rename to cmd/gotosocial/debug.go
index 8e162f6c7..f66d5ac0c 100644
--- a/cmd/gotosocial/flags.go
+++ b/cmd/gotosocial/debug.go
@@ -19,31 +19,30 @@
package main
import (
+ "github.com/spf13/cobra"
+ configaction "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action/debug/config"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/flag"
"github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
)
-func getFlags() []cli.Flag {
- flagNames := config.GetFlagNames()
- envNames := config.GetEnvNames()
- defaults := config.GetDefaults()
-
- flags := []cli.Flag{}
- flagSets := [][]cli.Flag{
- generalFlags(flagNames, envNames, defaults),
- databaseFlags(flagNames, envNames, defaults),
- templateFlags(flagNames, envNames, defaults),
- accountsFlags(flagNames, envNames, defaults),
- mediaFlags(flagNames, envNames, defaults),
- storageFlags(flagNames, envNames, defaults),
- statusesFlags(flagNames, envNames, defaults),
- letsEncryptFlags(flagNames, envNames, defaults),
- oidcFlags(flagNames, envNames, defaults),
- smtpFlags(flagNames, envNames, defaults),
- }
- for _, fs := range flagSets {
- flags = append(flags, fs...)
+func debugCommands() *cobra.Command {
+ debugCmd := &cobra.Command{
+ Use: "debug",
+ Short: "gotosocial debug-related tasks",
}
- return flags
+ debugConfigCmd := &cobra.Command{
+ Use: "config",
+ Short: "print the collated config (derived from env, flag, and config file) to stdout",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), configaction.Config)
+ },
+ }
+ flag.Server(debugConfigCmd, config.Defaults)
+
+ debugCmd.AddCommand(debugConfigCmd)
+ return debugCmd
}
diff --git a/cmd/gotosocial/flag/admin.go b/cmd/gotosocial/flag/admin.go
new file mode 100644
index 000000000..b087071bb
--- /dev/null
+++ b/cmd/gotosocial/flag/admin.go
@@ -0,0 +1,62 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package flag
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+)
+
+// AdminAccount attaches flags pertaining to admin account actions.
+func AdminAccount(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().String(config.Keys.AdminAccountUsername, "", usage.AdminAccountUsername) // REQUIRED
+ if err := cmd.MarkFlagRequired(config.Keys.AdminAccountUsername); err != nil {
+ panic(err)
+ }
+}
+
+// AdminAccountPassword attaches flags pertaining to admin account password reset.
+func AdminAccountPassword(cmd *cobra.Command, values config.Values) {
+ AdminAccount(cmd, values)
+ cmd.Flags().String(config.Keys.AdminAccountPassword, "", usage.AdminAccountPassword) // REQUIRED
+ if err := cmd.MarkFlagRequired(config.Keys.AdminAccountPassword); err != nil {
+ panic(err)
+ }
+}
+
+// AdminAccountCreate attaches flags pertaining to admin account creation.
+func AdminAccountCreate(cmd *cobra.Command, values config.Values) {
+ AdminAccount(cmd, values)
+ cmd.Flags().String(config.Keys.AdminAccountPassword, "", usage.AdminAccountPassword) // REQUIRED
+ if err := cmd.MarkFlagRequired(config.Keys.AdminAccountPassword); err != nil {
+ panic(err)
+ }
+ cmd.Flags().String(config.Keys.AdminAccountEmail, "", usage.AdminAccountEmail) // REQUIRED
+ if err := cmd.MarkFlagRequired(config.Keys.AdminAccountEmail); err != nil {
+ panic(err)
+ }
+}
+
+// AdminTrans attaches flags pertaining to import/export commands.
+func AdminTrans(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().String(config.Keys.AdminTransPath, "", usage.AdminTransPath) // REQUIRED
+ if err := cmd.MarkFlagRequired(config.Keys.AdminTransPath); err != nil {
+ panic(err)
+ }
+}
diff --git a/cmd/gotosocial/flag/global.go b/cmd/gotosocial/flag/global.go
new file mode 100644
index 000000000..b7fb03e17
--- /dev/null
+++ b/cmd/gotosocial/flag/global.go
@@ -0,0 +1,45 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package flag
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+)
+
+// Global attaches flags that are common to all commands, aka persistent commands.
+func Global(cmd *cobra.Command, values config.Values) {
+ // general stuff
+ cmd.PersistentFlags().String(config.Keys.ApplicationName, values.ApplicationName, usage.ApplicationName)
+ cmd.PersistentFlags().String(config.Keys.Host, values.Host, usage.Host)
+ cmd.PersistentFlags().String(config.Keys.AccountDomain, values.AccountDomain, usage.AccountDomain)
+ cmd.PersistentFlags().String(config.Keys.Protocol, values.Protocol, usage.Protocol)
+ cmd.PersistentFlags().String(config.Keys.LogLevel, values.LogLevel, usage.LogLevel)
+ cmd.PersistentFlags().String(config.Keys.ConfigPath, values.ConfigPath, usage.ConfigPath)
+
+ // database stuff
+ cmd.PersistentFlags().String(config.Keys.DbType, values.DbType, usage.DbType)
+ cmd.PersistentFlags().String(config.Keys.DbAddress, values.DbAddress, usage.DbAddress)
+ cmd.PersistentFlags().Int(config.Keys.DbPort, values.DbPort, usage.DbPort)
+ cmd.PersistentFlags().String(config.Keys.DbUser, values.DbUser, usage.DbUser)
+ cmd.PersistentFlags().String(config.Keys.DbPassword, values.DbPassword, usage.DbPassword)
+ cmd.PersistentFlags().String(config.Keys.DbDatabase, values.DbDatabase, usage.DbDatabase)
+ cmd.PersistentFlags().String(config.Keys.DbTLSMode, values.DbTLSMode, usage.DbTLSMode)
+ cmd.PersistentFlags().String(config.Keys.DbTLSCACert, values.DbTLSCACert, usage.DbTLSCACert)
+}
diff --git a/cmd/gotosocial/flag/server.go b/cmd/gotosocial/flag/server.go
new file mode 100644
index 000000000..3d3e946f3
--- /dev/null
+++ b/cmd/gotosocial/flag/server.go
@@ -0,0 +1,111 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package flag
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+)
+
+// Server attaches all flags pertaining to running the GtS server or testrig.
+func Server(cmd *cobra.Command, values config.Values) {
+ Template(cmd, values)
+ Accounts(cmd, values)
+ Media(cmd, values)
+ Storage(cmd, values)
+ Statuses(cmd, values)
+ LetsEncrypt(cmd, values)
+ OIDC(cmd, values)
+ SMTP(cmd, values)
+ Router(cmd, values)
+}
+
+// Router attaches flags pertaining to the gin router.
+func Router(cmd *cobra.Command, values config.Values) {
+ cmd.PersistentFlags().String(config.Keys.BindAddress, values.BindAddress, usage.BindAddress)
+ cmd.PersistentFlags().Int(config.Keys.Port, values.Port, usage.Port)
+ cmd.PersistentFlags().StringSlice(config.Keys.TrustedProxies, values.TrustedProxies, usage.TrustedProxies)
+}
+
+// Template attaches flags pertaining to templating config.
+func Template(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().String(config.Keys.WebTemplateBaseDir, values.WebTemplateBaseDir, usage.WebTemplateBaseDir)
+ cmd.Flags().String(config.Keys.WebAssetBaseDir, values.WebAssetBaseDir, usage.WebAssetBaseDir)
+}
+
+// Accounts attaches flags pertaining to account config.
+func Accounts(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().Bool(config.Keys.AccountsRegistrationOpen, values.AccountsRegistrationOpen, usage.AccountsRegistrationOpen)
+ cmd.Flags().Bool(config.Keys.AccountsApprovalRequired, values.AccountsApprovalRequired, usage.AccountsApprovalRequired)
+ cmd.Flags().Bool(config.Keys.AccountsReasonRequired, values.AccountsReasonRequired, usage.AccountsReasonRequired)
+}
+
+// Media attaches flags pertaining to media config.
+func Media(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().Int(config.Keys.MediaImageMaxSize, values.MediaImageMaxSize, usage.MediaImageMaxSize)
+ cmd.Flags().Int(config.Keys.MediaVideoMaxSize, values.MediaVideoMaxSize, usage.MediaVideoMaxSize)
+ cmd.Flags().Int(config.Keys.MediaDescriptionMinChars, values.MediaDescriptionMinChars, usage.MediaDescriptionMinChars)
+ cmd.Flags().Int(config.Keys.MediaDescriptionMaxChars, values.MediaDescriptionMaxChars, usage.MediaDescriptionMaxChars)
+}
+
+// Storage attaches flags pertaining to storage config.
+func Storage(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().String(config.Keys.StorageBackend, values.StorageBackend, usage.StorageBackend)
+ cmd.Flags().String(config.Keys.StorageBasePath, values.StorageBasePath, usage.StorageBasePath)
+ cmd.Flags().String(config.Keys.StorageServeProtocol, values.StorageServeProtocol, usage.StorageServeProtocol)
+ cmd.Flags().String(config.Keys.StorageServeHost, values.StorageServeHost, usage.StorageServeHost)
+ cmd.Flags().String(config.Keys.StorageServeBasePath, values.StorageServeBasePath, usage.StorageServeBasePath)
+}
+
+// Statuses attaches flags pertaining to statuses config.
+func Statuses(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().Int(config.Keys.StatusesMaxChars, values.StatusesMaxChars, usage.StatusesMaxChars)
+ cmd.Flags().Int(config.Keys.StatusesCWMaxChars, values.StatusesCWMaxChars, usage.StatusesCWMaxChars)
+ cmd.Flags().Int(config.Keys.StatusesPollMaxOptions, values.StatusesPollMaxOptions, usage.StatusesPollMaxOptions)
+ cmd.Flags().Int(config.Keys.StatusesPollOptionMaxChars, values.StatusesPollOptionMaxChars, usage.StatusesPollOptionMaxChars)
+ cmd.Flags().Int(config.Keys.StatusesMediaMaxFiles, values.StatusesMediaMaxFiles, usage.StatusesMediaMaxFiles)
+}
+
+// LetsEncrypt attaches flags pertaining to letsencrypt config.
+func LetsEncrypt(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().Bool(config.Keys.LetsEncryptEnabled, values.LetsEncryptEnabled, usage.LetsEncryptEnabled)
+ cmd.Flags().Int(config.Keys.LetsEncryptPort, values.LetsEncryptPort, usage.LetsEncryptPort)
+ cmd.Flags().String(config.Keys.LetsEncryptCertDir, values.LetsEncryptCertDir, usage.LetsEncryptCertDir)
+ cmd.Flags().String(config.Keys.LetsEncryptEmailAddress, values.LetsEncryptEmailAddress, usage.LetsEncryptEmailAddress)
+}
+
+// OIDC attaches flags pertaining to oidc config.
+func OIDC(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().Bool(config.Keys.OIDCEnabled, values.OIDCEnabled, usage.OIDCEnabled)
+ cmd.Flags().String(config.Keys.OIDCIdpName, values.OIDCIdpName, usage.OIDCIdpName)
+ cmd.Flags().Bool(config.Keys.OIDCSkipVerification, values.OIDCSkipVerification, usage.OIDCSkipVerification)
+ cmd.Flags().String(config.Keys.OIDCIssuer, values.OIDCIssuer, usage.OIDCIssuer)
+ cmd.Flags().String(config.Keys.OIDCClientID, values.OIDCClientID, usage.OIDCClientID)
+ cmd.Flags().String(config.Keys.OIDCClientSecret, values.OIDCClientSecret, usage.OIDCClientSecret)
+ cmd.Flags().StringSlice(config.Keys.OIDCScopes, values.OIDCScopes, usage.OIDCScopes)
+}
+
+// SMTP attaches flags pertaining to smtp/email config.
+func SMTP(cmd *cobra.Command, values config.Values) {
+ cmd.Flags().String(config.Keys.SMTPHost, values.SMTPHost, usage.SMTPHost)
+ cmd.Flags().Int(config.Keys.SMTPPort, values.SMTPPort, usage.SMTPPort)
+ cmd.Flags().String(config.Keys.SMTPUsername, values.SMTPUsername, usage.SMTPUsername)
+ cmd.Flags().String(config.Keys.SMTPPassword, values.SMTPPassword, usage.SMTPPassword)
+ cmd.Flags().String(config.Keys.SMTPFrom, values.SMTPFrom, usage.SMTPFrom)
+}
diff --git a/cmd/gotosocial/flag/usage.go b/cmd/gotosocial/flag/usage.go
new file mode 100644
index 000000000..aa57048c1
--- /dev/null
+++ b/cmd/gotosocial/flag/usage.go
@@ -0,0 +1,80 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package flag
+
+import "github.com/superseriousbusiness/gotosocial/internal/config"
+
+var usage = config.KeyNames{
+ LogLevel: "Log level to run at: [trace, debug, info, warn, fatal]",
+ ApplicationName: "Name of the application, used in various places internally",
+ ConfigPath: "Path to a file containing gotosocial configuration. Values set in this file will be overwritten by values set as env vars or arguments",
+ Host: "Hostname to use for the server (eg., example.org, gotosocial.whatever.com). DO NOT change this on a server that's already run!",
+ AccountDomain: "Domain to use in account names (eg., example.org, whatever.com). If not set, will default to the setting for host. DO NOT change this on a server that's already run!",
+ Protocol: "Protocol to use for the REST api of the server (only use http for debugging and tests!)",
+ BindAddress: "Bind address to use for the GoToSocial server (eg., 0.0.0.0, 172.138.0.9, [::], localhost). For ipv6, enclose the address in square brackets, eg [2001:db8::fed1]. Default binds to all interfaces.",
+ Port: "Port to use for GoToSocial. Change this to 443 if you're running the binary directly on the host machine.",
+ TrustedProxies: "Proxies to trust when parsing x-forwarded headers into real IPs.",
+ DbType: "Database type: eg., postgres",
+ DbAddress: "Database ipv4 address, hostname, or filename",
+ DbPort: "Database port",
+ DbUser: "Database username",
+ DbPassword: "Database password",
+ DbDatabase: "Database name",
+ DbTLSMode: "Database tls mode",
+ DbTLSCACert: "Path to CA cert for db tls connection",
+ WebTemplateBaseDir: "Basedir for html templating files for rendering pages and composing emails.",
+ WebAssetBaseDir: "Directory to serve static assets from, accessible at example.org/assets/",
+ AccountsRegistrationOpen: "Allow anyone to submit an account signup request. If false, server will be invite-only.",
+ AccountsApprovalRequired: "Do account signups require approval by an admin or moderator before user can log in? If false, new registrations will be automatically approved.",
+ AccountsReasonRequired: "Do new account signups require a reason to be submitted on registration?",
+ MediaImageMaxSize: "Max size of accepted images in bytes",
+ MediaVideoMaxSize: "Max size of accepted videos in bytes",
+ MediaDescriptionMinChars: "Min required chars for an image description",
+ MediaDescriptionMaxChars: "Max permitted chars for an image description",
+ StorageBackend: "Storage backend to use for media attachments",
+ StorageBasePath: "Full path to an already-created directory where gts should store/retrieve media files. Subfolders will be created within this dir.",
+ StorageServeProtocol: "Protocol to use for serving media attachments (use https if storage is local)",
+ StorageServeHost: "Hostname to serve media attachments from (use the same value as host if storage is local)",
+ StorageServeBasePath: "Path to append to protocol and hostname to create the base path from which media files will be served (default will mostly be fine)",
+ StatusesMaxChars: "Max permitted characters for posted statuses",
+ StatusesCWMaxChars: "Max permitted characters for content/spoiler warnings on statuses",
+ StatusesPollMaxOptions: "Max amount of options permitted on a poll",
+ StatusesPollOptionMaxChars: "Max amount of characters for a poll option",
+ StatusesMediaMaxFiles: "Maximum number of media files/attachments per status",
+ LetsEncryptEnabled: "Enable letsencrypt TLS certs for this server. If set to true, then cert dir also needs to be set (or take the default).",
+ LetsEncryptPort: "Port to listen on for letsencrypt certificate challenges. Must not be the same as the GtS webserver/API port.",
+ LetsEncryptCertDir: "Directory to store acquired letsencrypt certificates.",
+ LetsEncryptEmailAddress: "Email address to use when requesting letsencrypt certs. Will receive updates on cert expiry etc.",
+ OIDCEnabled: "Enabled OIDC authorization for this instance. If set to true, then the other OIDC flags must also be set.",
+ OIDCIdpName: "Name of the OIDC identity provider. Will be shown to the user when logging in.",
+ OIDCSkipVerification: "Skip verification of tokens returned by the OIDC provider. Should only be set to 'true' for testing purposes, never in a production environment!",
+ OIDCIssuer: "Address of the OIDC issuer. Should be the web address, including protocol, at which the issuer can be reached. Eg., 'https://example.org/auth'",
+ OIDCClientID: "ClientID of GoToSocial, as registered with the OIDC provider.",
+ OIDCClientSecret: "ClientSecret of GoToSocial, as registered with the OIDC provider.",
+ OIDCScopes: "OIDC scopes.",
+ SMTPHost: "Host of the smtp server. Eg., 'smtp.eu.mailgun.org'",
+ SMTPPort: "Port of the smtp server. Eg., 587",
+ SMTPUsername: "Username to authenticate with the smtp server as. Eg., 'postmaster@mail.example.org'",
+ SMTPPassword: "Password to pass to the smtp server.",
+ SMTPFrom: "Address to use as the 'from' field of the email. Eg., 'gotosocial@example.org'",
+ AdminAccountUsername: "the username to create/delete/etc",
+ AdminAccountEmail: "the email address of this account",
+ AdminAccountPassword: "the password to set for this account",
+ AdminTransPath: "the path of the file to import from/export to",
+}
diff --git a/cmd/gotosocial/generalflags.go b/cmd/gotosocial/generalflags.go
deleted file mode 100644
index b7e15ab20..000000000
--- a/cmd/gotosocial/generalflags.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func generalFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.StringFlag{
- Name: flagNames.LogLevel,
- Usage: "Log level to run at: debug, info, warn, fatal",
- Value: defaults.LogLevel,
- EnvVars: []string{envNames.LogLevel},
- },
- &cli.StringFlag{
- Name: flagNames.ApplicationName,
- Usage: "Name of the application, used in various places internally",
- Value: defaults.ApplicationName,
- EnvVars: []string{envNames.ApplicationName},
- Hidden: true,
- },
- &cli.StringFlag{
- Name: flagNames.ConfigPath,
- Usage: "Path to a yaml file containing gotosocial configuration. Values set in this file will be overwritten by values set as env vars or arguments",
- Value: defaults.ConfigPath,
- EnvVars: []string{envNames.ConfigPath},
- },
- &cli.StringFlag{
- Name: flagNames.Host,
- Usage: "Hostname to use for the server (eg., example.org, gotosocial.whatever.com). DO NOT change this on a server that's already run!",
- Value: defaults.Host,
- EnvVars: []string{envNames.Host},
- },
- &cli.StringFlag{
- Name: flagNames.AccountDomain,
- Usage: "Domain to use in account names (eg., example.org, whatever.com). If not set, will default to the setting for host. DO NOT change this on a server that's already run!",
- Value: defaults.AccountDomain,
- EnvVars: []string{envNames.AccountDomain},
- },
- &cli.StringFlag{
- Name: flagNames.Protocol,
- Usage: "Protocol to use for the REST api of the server (only use http for debugging and tests!)",
- Value: defaults.Protocol,
- EnvVars: []string{envNames.Protocol},
- },
- &cli.StringFlag{
- Name: flagNames.BindAddress,
- Usage: "Bind address to use for the GoToSocial server (eg., 0.0.0.0, 172.138.0.9, [::], localhost). For ipv6, enclose the address in square brackets, eg [2001:db8::fed1]. Default binds to all interfaces.",
- Value: defaults.BindAddress,
- EnvVars: []string{envNames.BindAddress},
- },
- &cli.IntFlag{
- Name: flagNames.Port,
- Usage: "Port to use for GoToSocial. Change this to 443 if you're running the binary directly on the host machine.",
- Value: defaults.Port,
- EnvVars: []string{envNames.Port},
- },
- &cli.StringSliceFlag{
- Name: flagNames.TrustedProxies,
- Usage: "Proxies to trust when parsing x-forwarded headers into real IPs.",
- Value: cli.NewStringSlice(defaults.TrustedProxies...),
- EnvVars: []string{envNames.TrustedProxies},
- },
- }
-}
diff --git a/cmd/gotosocial/letsencryptflags.go b/cmd/gotosocial/letsencryptflags.go
deleted file mode 100644
index 86079c015..000000000
--- a/cmd/gotosocial/letsencryptflags.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func letsEncryptFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.BoolFlag{
- Name: flagNames.LetsEncryptEnabled,
- Usage: "Enable letsencrypt TLS certs for this server. If set to true, then cert dir also needs to be set (or take the default).",
- Value: defaults.LetsEncryptEnabled,
- EnvVars: []string{envNames.LetsEncryptEnabled},
- },
- &cli.IntFlag{
- Name: flagNames.LetsEncryptPort,
- Usage: "Port to listen on for letsencrypt certificate challenges. Must not be the same as the GtS webserver/API port.",
- Value: defaults.LetsEncryptPort,
- EnvVars: []string{envNames.LetsEncryptPort},
- },
- &cli.StringFlag{
- Name: flagNames.LetsEncryptCertDir,
- Usage: "Directory to store acquired letsencrypt certificates.",
- Value: defaults.LetsEncryptCertDir,
- EnvVars: []string{envNames.LetsEncryptCertDir},
- },
- &cli.StringFlag{
- Name: flagNames.LetsEncryptEmailAddress,
- Usage: "Email address to use when requesting letsencrypt certs. Will receive updates on cert expiry etc.",
- Value: defaults.LetsEncryptEmailAddress,
- EnvVars: []string{envNames.LetsEncryptEmailAddress},
- },
- }
-}
diff --git a/cmd/gotosocial/main.go b/cmd/gotosocial/main.go
index 87c44487c..a0b8b1046 100644
--- a/cmd/gotosocial/main.go
+++ b/cmd/gotosocial/main.go
@@ -19,38 +19,59 @@
package main
import (
- "os"
-
"github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/flag"
_ "github.com/superseriousbusiness/gotosocial/docs"
- "github.com/urfave/cli/v2"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
)
-// Version is the software version of GtS being used
+// Version is the software version of GtS being used.
var Version string
-// Commit is the git commit of GtS being used
+// Commit is the git commit of GtS being used.
var Commit string
//go:generate swagger generate spec
func main() {
var v string
- if Commit == "" {
+ if len(Commit) < 7 {
v = Version
} else {
v = Version + " " + Commit[:7]
}
- flagsSlice := getFlags()
- app := &cli.App{
- Version: v,
- Usage: "a fediverse social media server",
- Flags: flagsSlice,
- Commands: getCommands(flagsSlice),
+ // override software version in viper store
+ viper.Set(config.Keys.SoftwareVersion, v)
+
+ // instantiate the root command
+ rootCmd := &cobra.Command{
+ Use: "gotosocial",
+ Short: "GoToSocial - a fediverse social media server",
+ Long: "GoToSocial - a fediverse social media server\n\nFor help, see: https://docs.gotosocial.org.\n\nCode: https://github.com/superseriousbusiness/gotosocial",
+ Version: v,
+ SilenceErrors: true,
+ SilenceUsage: true,
}
- if err := app.Run(os.Args); err != nil {
- logrus.Fatal(err)
+ // attach global flags to the root command so that they can be accessed from any subcommand
+ flag.Global(rootCmd, config.Defaults)
+
+ // bind the config-path flag to viper early so that we can call it in the pre-run of following commands
+ if err := viper.BindPFlag(config.Keys.ConfigPath, rootCmd.PersistentFlags().Lookup(config.Keys.ConfigPath)); err != nil {
+ logrus.Fatalf("error attaching config flag: %s", err)
+ }
+
+ // add subcommands
+ rootCmd.AddCommand(serverCommands())
+ rootCmd.AddCommand(testrigCommands())
+ rootCmd.AddCommand(debugCommands())
+ rootCmd.AddCommand(adminCommands())
+
+ // run
+ if err := rootCmd.Execute(); err != nil {
+ logrus.Fatalf("error executing command: %s", err)
}
}
diff --git a/cmd/gotosocial/mediaflags.go b/cmd/gotosocial/mediaflags.go
deleted file mode 100644
index abf009a89..000000000
--- a/cmd/gotosocial/mediaflags.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func mediaFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.IntFlag{
- Name: flagNames.MediaMaxImageSize,
- Usage: "Max size of accepted images in bytes",
- Value: defaults.MediaMaxImageSize,
- EnvVars: []string{envNames.MediaMaxImageSize},
- },
- &cli.IntFlag{
- Name: flagNames.MediaMaxVideoSize,
- Usage: "Max size of accepted videos in bytes",
- Value: defaults.MediaMaxVideoSize,
- EnvVars: []string{envNames.MediaMaxVideoSize},
- },
- &cli.IntFlag{
- Name: flagNames.MediaMinDescriptionChars,
- Usage: "Min required chars for an image description",
- Value: defaults.MediaMinDescriptionChars,
- EnvVars: []string{envNames.MediaMinDescriptionChars},
- },
- &cli.IntFlag{
- Name: flagNames.MediaMaxDescriptionChars,
- Usage: "Max permitted chars for an image description",
- Value: defaults.MediaMaxDescriptionChars,
- EnvVars: []string{envNames.MediaMaxDescriptionChars},
- },
- }
-}
diff --git a/cmd/gotosocial/oidcflags.go b/cmd/gotosocial/oidcflags.go
deleted file mode 100644
index 93b86b166..000000000
--- a/cmd/gotosocial/oidcflags.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func oidcFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.BoolFlag{
- Name: flagNames.OIDCEnabled,
- Usage: "Enabled OIDC authorization for this instance. If set to true, then the other OIDC flags must also be set.",
- Value: defaults.OIDCEnabled,
- EnvVars: []string{envNames.OIDCEnabled},
- },
- &cli.StringFlag{
- Name: flagNames.OIDCIdpName,
- Usage: "Name of the OIDC identity provider. Will be shown to the user when logging in.",
- Value: defaults.OIDCIdpName,
- EnvVars: []string{envNames.OIDCIdpName},
- },
- &cli.BoolFlag{
- Name: flagNames.OIDCSkipVerification,
- Usage: "Skip verification of tokens returned by the OIDC provider. Should only be set to 'true' for testing purposes, never in a production environment!",
- Value: defaults.OIDCSkipVerification,
- EnvVars: []string{envNames.OIDCSkipVerification},
- },
- &cli.StringFlag{
- Name: flagNames.OIDCIssuer,
- Usage: "Address of the OIDC issuer. Should be the web address, including protocol, at which the issuer can be reached. Eg., 'https://example.org/auth'",
- Value: defaults.OIDCIssuer,
- EnvVars: []string{envNames.OIDCIssuer},
- },
- &cli.StringFlag{
- Name: flagNames.OIDCClientID,
- Usage: "ClientID of GoToSocial, as registered with the OIDC provider.",
- Value: defaults.OIDCClientID,
- EnvVars: []string{envNames.OIDCClientID},
- },
- &cli.StringFlag{
- Name: flagNames.OIDCClientSecret,
- Usage: "ClientSecret of GoToSocial, as registered with the OIDC provider.",
- Value: defaults.OIDCClientSecret,
- EnvVars: []string{envNames.OIDCClientSecret},
- },
- &cli.StringSliceFlag{
- Name: flagNames.OIDCScopes,
- Usage: "ClientSecret of GoToSocial, as registered with the OIDC provider.",
- Value: cli.NewStringSlice(defaults.OIDCScopes...),
- EnvVars: []string{envNames.OIDCScopes},
- },
- }
-}
diff --git a/cmd/gotosocial/runaction.go b/cmd/gotosocial/runaction.go
deleted file mode 100644
index 96c4edaf6..000000000
--- a/cmd/gotosocial/runaction.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "fmt"
-
- "github.com/superseriousbusiness/gotosocial/internal/cliactions"
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/superseriousbusiness/gotosocial/internal/log"
- "github.com/urfave/cli/v2"
-)
-
-type MonkeyPatchedCLIContext struct {
- CLIContext *cli.Context
- AllFlags []cli.Flag
-}
-
-func (f MonkeyPatchedCLIContext) Bool(k string) bool { return f.CLIContext.Bool(k) }
-func (f MonkeyPatchedCLIContext) String(k string) string { return f.CLIContext.String(k) }
-func (f MonkeyPatchedCLIContext) StringSlice(k string) []string { return f.CLIContext.StringSlice(k) }
-func (f MonkeyPatchedCLIContext) Int(k string) int { return f.CLIContext.Int(k) }
-func (f MonkeyPatchedCLIContext) IsSet(k string) bool {
- for _, flag := range f.AllFlags {
- flagNames := flag.Names()
- for _, name := range flagNames {
- if name == k {
- return flag.IsSet()
- }
- }
-
- }
- return false
-}
-
-// runAction builds up the config and logger necessary for any
-// gotosocial action, and then executes the action.
-func runAction(c *cli.Context, allFlags []cli.Flag, a cliactions.GTSAction) error {
-
- // create a new *config.Config based on the config path provided...
- conf, err := config.FromFile(c.String(config.GetFlagNames().ConfigPath))
- if err != nil {
- return fmt.Errorf("error creating config: %s", err)
- }
-
- // ... and the flags set on the *cli.Context by urfave
- //
- // The IsSet function on the cli.Context object `c` here appears to have some issues right now, it always returns false in my tests.
- // However we can re-create the behaviour we want by simply referencing the flag objects we created previously
- // https://picopublish.sequentialread.com/files/chatlog_2021_11_18.txt
- monkeyPatchedCLIContext := MonkeyPatchedCLIContext{
- CLIContext: c,
- AllFlags: allFlags,
- }
- if err := conf.ParseCLIFlags(monkeyPatchedCLIContext, c.App.Version); err != nil {
- return fmt.Errorf("error parsing config: %s", err)
- }
-
- // initialize the global logger to the log level, with formatting and output splitter already set
- err = log.Initialize(conf.LogLevel)
- if err != nil {
- return fmt.Errorf("error creating logger: %s", err)
- }
-
- return a(c.Context, conf)
-}
diff --git a/cmd/gotosocial/templateflags.go b/cmd/gotosocial/server.go
similarity index 53%
rename from cmd/gotosocial/templateflags.go
rename to cmd/gotosocial/server.go
index c7bf93bc0..742a4f505 100644
--- a/cmd/gotosocial/templateflags.go
+++ b/cmd/gotosocial/server.go
@@ -19,23 +19,31 @@
package main
import (
+ "github.com/spf13/cobra"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action/server"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/flag"
"github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
)
-func templateFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.StringFlag{
- Name: flagNames.TemplateBaseDir,
- Usage: "Basedir for html templating files for rendering pages and composing emails.",
- Value: defaults.TemplateBaseDir,
- EnvVars: []string{envNames.TemplateBaseDir},
+// serverCommands returns the 'server' subcommand
+func serverCommands() *cobra.Command {
+ serverCmd := &cobra.Command{
+ Use: "server",
+ Short: "gotosocial server-related tasks",
+ }
+
+ serverStartCmd := &cobra.Command{
+ Use: "start",
+ Short: "start the gotosocial server",
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return preRun(cmd)
},
- &cli.StringFlag{
- Name: flagNames.AssetBaseDir,
- Usage: "Directory to serve static assets from, accessible at example.com/assets/",
- Value: defaults.AssetBaseDir,
- EnvVars: []string{envNames.AssetBaseDir},
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), server.Start)
},
}
+ flag.Server(serverStartCmd, config.Defaults)
+
+ serverCmd.AddCommand(serverStartCmd)
+ return serverCmd
}
diff --git a/cmd/gotosocial/servercommands.go b/cmd/gotosocial/servercommands.go
deleted file mode 100644
index fb6574216..000000000
--- a/cmd/gotosocial/servercommands.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/cliactions/server"
- "github.com/urfave/cli/v2"
-)
-
-func serverCommands(allFlags []cli.Flag) []*cli.Command {
- return []*cli.Command{
- {
- Name: "server",
- Usage: "gotosocial server-related tasks",
- Subcommands: []*cli.Command{
- {
- Name: "start",
- Usage: "start the gotosocial server",
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, server.Start)
- },
- },
- },
- },
- }
-}
diff --git a/cmd/gotosocial/smtpflags.go b/cmd/gotosocial/smtpflags.go
deleted file mode 100644
index 5c790ef7e..000000000
--- a/cmd/gotosocial/smtpflags.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func smtpFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.StringFlag{
- Name: flagNames.SMTPHost,
- Usage: "Host of the smtp server. Eg., 'smtp.eu.mailgun.org'",
- Value: defaults.SMTPHost,
- EnvVars: []string{envNames.SMTPHost},
- },
- &cli.IntFlag{
- Name: flagNames.SMTPPort,
- Usage: "Port of the smtp server. Eg., 587",
- Value: defaults.SMTPPort,
- EnvVars: []string{envNames.SMTPPort},
- },
- &cli.StringFlag{
- Name: flagNames.SMTPUsername,
- Usage: "Username to authenticate with the smtp server as. Eg., 'postmaster@mail.example.org'",
- Value: defaults.SMTPUsername,
- EnvVars: []string{envNames.SMTPUsername},
- },
- &cli.StringFlag{
- Name: flagNames.SMTPPassword,
- Usage: "Password to pass to the smtp server.",
- Value: defaults.SMTPPassword,
- EnvVars: []string{envNames.SMTPPassword},
- },
- &cli.StringFlag{
- Name: flagNames.SMTPFrom,
- Usage: "Address to use as the 'from' field of the email. Eg., 'gotosocial@example.org'",
- Value: defaults.SMTPFrom,
- EnvVars: []string{envNames.SMTPFrom},
- },
- }
-}
diff --git a/cmd/gotosocial/statusesflags.go b/cmd/gotosocial/statusesflags.go
deleted file mode 100644
index af52dc0ae..000000000
--- a/cmd/gotosocial/statusesflags.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func statusesFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.IntFlag{
- Name: flagNames.StatusesMaxChars,
- Usage: "Max permitted characters for posted statuses",
- Value: defaults.StatusesMaxChars,
- EnvVars: []string{envNames.StatusesMaxChars},
- },
- &cli.IntFlag{
- Name: flagNames.StatusesCWMaxChars,
- Usage: "Max permitted characters for content/spoiler warnings on statuses",
- Value: defaults.StatusesCWMaxChars,
- EnvVars: []string{envNames.StatusesCWMaxChars},
- },
- &cli.IntFlag{
- Name: flagNames.StatusesPollMaxOptions,
- Usage: "Max amount of options permitted on a poll",
- Value: defaults.StatusesPollMaxOptions,
- EnvVars: []string{envNames.StatusesPollMaxOptions},
- },
- &cli.IntFlag{
- Name: flagNames.StatusesPollOptionMaxChars,
- Usage: "Max amount of characters for a poll option",
- Value: defaults.StatusesPollOptionMaxChars,
- EnvVars: []string{envNames.StatusesPollOptionMaxChars},
- },
- &cli.IntFlag{
- Name: flagNames.StatusesMaxMediaFiles,
- Usage: "Maximum number of media files/attachments per status",
- Value: defaults.StatusesMaxMediaFiles,
- EnvVars: []string{envNames.StatusesMaxMediaFiles},
- },
- }
-}
diff --git a/cmd/gotosocial/storageflags.go b/cmd/gotosocial/storageflags.go
deleted file mode 100644
index 326e1fae5..000000000
--- a/cmd/gotosocial/storageflags.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package main
-
-import (
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/urfave/cli/v2"
-)
-
-func storageFlags(flagNames, envNames config.Flags, defaults config.Defaults) []cli.Flag {
- return []cli.Flag{
- &cli.StringFlag{
- Name: flagNames.StorageBackend,
- Usage: "Storage backend to use for media attachments",
- Value: defaults.StorageBackend,
- EnvVars: []string{envNames.StorageBackend},
- },
- &cli.StringFlag{
- Name: flagNames.StorageBasePath,
- Usage: "Full path to an already-created directory where gts should store/retrieve media files. Subfolders will be created within this dir.",
- Value: defaults.StorageBasePath,
- EnvVars: []string{envNames.StorageBasePath},
- },
- &cli.StringFlag{
- Name: flagNames.StorageServeProtocol,
- Usage: "Protocol to use for serving media attachments (use https if storage is local)",
- Value: defaults.StorageServeProtocol,
- EnvVars: []string{envNames.StorageServeProtocol},
- },
- &cli.StringFlag{
- Name: flagNames.StorageServeHost,
- Usage: "Hostname to serve media attachments from (use the same value as host if storage is local)",
- Value: defaults.StorageServeHost,
- EnvVars: []string{envNames.StorageServeHost},
- },
- &cli.StringFlag{
- Name: flagNames.StorageServeBasePath,
- Usage: "Path to append to protocol and hostname to create the base path from which media files will be served (default will mostly be fine)",
- Value: defaults.StorageServeBasePath,
- EnvVars: []string{envNames.StorageServeBasePath},
- },
- }
-}
diff --git a/cmd/gotosocial/testrigcommands.go b/cmd/gotosocial/testrig.go
similarity index 60%
rename from cmd/gotosocial/testrigcommands.go
rename to cmd/gotosocial/testrig.go
index aabe04267..b78996475 100644
--- a/cmd/gotosocial/testrigcommands.go
+++ b/cmd/gotosocial/testrig.go
@@ -19,24 +19,24 @@
package main
import (
- "github.com/superseriousbusiness/gotosocial/internal/cliactions/testrig"
- "github.com/urfave/cli/v2"
+ "github.com/spf13/cobra"
+ "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action/testrig"
)
-func testrigCommands(allFlags []cli.Flag) []*cli.Command {
- return []*cli.Command{
- {
- Name: "testrig",
- Usage: "gotosocial testrig tasks",
- Subcommands: []*cli.Command{
- {
- Name: "start",
- Usage: "start the gotosocial testrig",
- Action: func(c *cli.Context) error {
- return runAction(c, allFlags, testrig.Start)
- },
- },
- },
+func testrigCommands() *cobra.Command {
+ testrigCmd := &cobra.Command{
+ Use: "testrig",
+ Short: "gotosocial testrig-related tasks",
+ }
+
+ testrigStartCmd := &cobra.Command{
+ Use: "start",
+ Short: "start the gotosocial testrig server",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return run(cmd.Context(), testrig.Start)
},
}
+
+ testrigCmd.AddCommand(testrigStartCmd)
+ return testrigCmd
}
diff --git a/docs/admin/cli.md b/docs/admin/cli.md
index e15daf084..a65282ceb 100644
--- a/docs/admin/cli.md
+++ b/docs/admin/cli.md
@@ -4,35 +4,34 @@ GoToSocial compiles to an executable binary.
The standard way of using this binary is to run a server with the `gotosocial server start` command.
-However, this binary can also be used as an admin tool.
+However, this binary can also be used as an admin tool, and for debugging.
Here's the full output of `gotosocial --help`, without the big list of global config options.
```text
-NAME:
- gotosocial - a fediverse social media server
+GoToSocial - a fediverse social media server
-USAGE:
- gotosocial [global options] command [command options] [arguments...]
+For help, see: https://docs.gotosocial.org.
-VERSION:
- 0.1.0-SNAPSHOT a940a52
+Code: https://github.com/superseriousbusiness/gotosocial
-COMMANDS:
- server gotosocial server-related tasks
- admin gotosocial admin-related tasks
- testrig gotosocial testrig tasks
- help, h Shows a list of commands or help for one command
+Usage:
+ gotosocial [command]
-GLOBAL OPTIONS:
- [a huge list of global options -- too much to show here]
+Available Commands:
+ admin gotosocial admin-related tasks
+ completion generate the autocompletion script for the specified shell
+ debug gotosocial debug-related tasks
+ help Help about any command
+ server gotosocial server-related tasks
+ testrig gotosocial testrig-related tasks
```
-Under `COMMANDS`, you can see the standard `server` command. But there are also commands doing admin and testing etc, which will be explained in this document.
+Under `Available Commands`, you can see the standard `server` command. But there are also commands doing admin and testing etc, which will be explained in this document.
**Please note -- for all of these commands, you will still need to set the global options correctly so that the CLI tool knows how eg., how to connect to your database, which database to use, which host and account domain to use etc.**
-You can set these global options using environment variables, passing them as CLI variables after the `gotosocial` part of the command (eg., `gotosocial --host example.org [commands]`), or by just pointing the CLI tool towards your config file (eg., `gotosocial --config-path ./config.yaml [commands]`).
+You can set these options using environment variables, passing them as CLI flags (eg., `gotosocial [commands] --host example.org`), or by just pointing the CLI tool towards your config file (eg., `gotosocial [commands] --config-path ./config.yaml`).
## gotosocial admin
@@ -45,17 +44,16 @@ This command can be used to create a new account on your instance.
`gotosocial admin account create --help`:
```text
-NAME:
- gotosocial admin account create - create a new account
+create a new account
-USAGE:
- gotosocial admin account create [command options] [arguments...]
+Usage:
+ gotosocial admin account create [flags]
-OPTIONS:
- --username value the username to create/delete/etc
- --email value the email address of this account
- --password value the password to set for this account
- --help, -h show help (default: false)
+Flags:
+ --email string the email address of this account
+ -h, --help help for create
+ --password string the password to set for this account
+ --username string the username to create/delete/etc
```
Example:
@@ -74,15 +72,14 @@ This command can be used to confirm a user+account on your instance, allowing th
`gotosocial admin account confirm --help`:
```text
-NAME:
- gotosocial admin account confirm - confirm an existing account manually, thereby skipping email confirmation
+confirm an existing account manually, thereby skipping email confirmation
-USAGE:
- gotosocial admin account confirm [command options] [arguments...]
+Usage:
+ gotosocial admin account confirm [flags]
-OPTIONS:
- --username value the username to create/delete/etc
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for confirm
+ --username string the username to create/delete/etc
```
Example:
@@ -98,15 +95,14 @@ This command can be used to promote a user to admin.
`gotosocial admin account promote --help`:
```text
-NAME:
- gotosocial admin account promote - promote an account to admin
+promote an account to admin
-USAGE:
- gotosocial admin account promote [command options] [arguments...]
+Usage:
+ gotosocial admin account promote [flags]
-OPTIONS:
- --username value the username to create/delete/etc
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for promote
+ --username string the username to create/delete/etc
```
Example:
@@ -122,15 +118,14 @@ This command can be used to demote a user from admin to normal user.
`gotosocial admin account demote --help`:
```text
-NAME:
- gotosocial admin account demote - demote an account from admin to normal user
+demote an account from admin to normal user
-USAGE:
- gotosocial admin account demote [command options] [arguments...]
+Usage:
+ gotosocial admin account demote [flags]
-OPTIONS:
- --username value the username to create/delete/etc
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for demote
+ --username string the username to create/delete/etc
```
Example:
@@ -146,15 +141,14 @@ This command can be used to disable an account: prevent it from signing in or do
`gotosocial admin account disable --help`:
```text
-NAME:
- gotosocial admin account disable - prevent an account from signing in or posting etc, but don't delete anything
+prevent an account from signing in or posting etc, but don't delete anything
-USAGE:
- gotosocial admin account disable [command options] [arguments...]
+Usage:
+ gotosocial admin account disable [flags]
-OPTIONS:
- --username value the username to create/delete/etc
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for disable
+ --username string the username to create/delete/etc
```
Example:
@@ -172,15 +166,14 @@ In other words, this 'deletes' the account (without actually removing the accoun
`gotosocial admin account suspend --help`:
```text
-NAME:
- gotosocial admin account suspend - completely remove an account and all of its posts, media, etc
+completely remove an account and all of its posts, media, etc
-USAGE:
- gotosocial admin account suspend [command options] [arguments...]
+Usage:
+ gotosocial admin account suspend [flags]
-OPTIONS:
- --username value the username to create/delete/etc
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for suspend
+ --username string the username to create/delete/etc
```
Example:
@@ -196,16 +189,15 @@ This command can be used to set a new password on the given account.
`gotosocial admin account password --help`:
```text
-NAME:
- gotosocial admin account password - set a new password for the given account
+set a new password for the given account
-USAGE:
- gotosocial admin account password [command options] [arguments...]
+Usage:
+ gotosocial admin account password [flags]
-OPTIONS:
- --username value the username to create/delete/etc
- --password value the password to set for this account
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for password
+ --password string the password to set for this account
+ --username string the username to create/delete/etc
```
Example:
@@ -223,21 +215,20 @@ The file format will be a series of newline-separated JSON objects.
`gotosocial admin export --help`:
```text
-NAME:
- gotosocial admin export - export data from the database to file at the given path
+export data from the database to file at the given path
-USAGE:
- gotosocial admin export [command options] [arguments...]
+Usage:
+ gotosocial admin export [flags]
-OPTIONS:
- --path value the path of the file to import from/export to
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for export
+ --path string the path of the file to import from/export to
```
Example:
```bash
-gotosocial admin export --path ./example.json
+gotosocial admin export --config-file ./config.yaml --path ./example.json
```
`example.json`:
@@ -272,19 +263,18 @@ The file format should be a series of newline-separated JSON objects (see above)
`gotosocial admin import --help`:
```text
-NAME:
- gotosocial admin import - import data from a file into the database
+import data from a file into the database
-USAGE:
- gotosocial admin import [command options] [arguments...]
+Usage:
+ gotosocial admin import [flags]
-OPTIONS:
- --path value the path of the file to import from/export to
- --help, -h show help (default: false)
+Flags:
+ -h, --help help for import
+ --path string the path of the file to import from/export to
```
Example:
```bash
-gotosocial admin import --path ./example.json
+gotosocial admin import --config-file ./config.yaml --path ./example.json
```
diff --git a/docs/configuration/accounts.md b/docs/configuration/accounts.md
index e77bc92fc..edb647a1b 100644
--- a/docs/configuration/accounts.md
+++ b/docs/configuration/accounts.md
@@ -8,20 +8,19 @@
###########################
# Config pertaining to creation and maintenance of accounts on the server, as well as defaults for new accounts.
-accounts:
- # Bool. Do we want people to be able to just submit sign up requests, or do we want invite only?
- # Options: [true, false]
- # Default: true
- openRegistration: true
+# Bool. Do we want people to be able to just submit sign up requests, or do we want invite only?
+# Options: [true, false]
+# Default: true
+accounts-registration-open: true
- # Bool. Do sign up requests require approval from an admin/moderator before an account can sign in/use the server?
- # Options: [true, false]
- # Default: true
- requireApproval: true
+# Bool. Do sign up requests require approval from an admin/moderator before an account can sign in/use the server?
+# Options: [true, false]
+# Default: true
+accounts-approval-required: true
- # Bool. Are sign up requests required to submit a reason for the request (eg., an explanation of why they want to join the instance)?
- # Options: [true, false]
- # Default: true
- reasonRequired: true
+# Bool. Are sign up requests required to submit a reason for the request (eg., an explanation of why they want to join the instance)?
+# Options: [true, false]
+# Default: true
+accounts-reason-required: true
```
diff --git a/docs/configuration/database.md b/docs/configuration/database.md
index 0c6cf692e..6acc2e9ec 100644
--- a/docs/configuration/database.md
+++ b/docs/configuration/database.md
@@ -8,7 +8,7 @@ By default, GoToSocial will use Postgres, but this is easy to change.
SQLite, as the name implies, is the lightest database type that GoToSocial can use. It stores entries in a simple file format, usually in the same directory as the GoToSocial binary itself. SQLite is great for small instances and lower-powered machines like Raspberry Pi, where a dedicated database would be overkill.
-To configure GoToSocial to use SQLite, change `db.type` to `sqlite`. The `address` setting will then be a filename instead of an address, so you might want to change it to `sqlite.db` or something similar.
+To configure GoToSocial to use SQLite, change `db-type` to `sqlite`. The `address` setting will then be a filename instead of an address, so you might want to change it to `sqlite.db` or something similar.
Note that the `:memory:` setting will use an *in-memory database* which will be wiped when your GoToSocial instance stops running. This is for testing only and is absolutely not suitable for running a proper instance, so *don't do this*.
@@ -18,7 +18,7 @@ Postgres is a heavier database format, which is useful for larger instances wher
GoToSocial supports connecting to Postgres using SSL/TLS. If you're running Postgres on a different machine from GoToSocial, and connecting to it via an IP address or hostname (as opposed to just running on localhost), then SSL/TLS is **CRUCIAL** to avoid leaking data all over the place!
-When you're using Postgres, GoToSocial expects whatever you've set for `db.user` to already be created in the database, and to have ownership of whatever you've set for `db.database`.
+When you're using Postgres, GoToSocial expects whatever you've set for `db-user` to already be created in the database, and to have ownership of whatever you've set for `db-database`.
For example, if you set:
@@ -48,51 +48,50 @@ grant all privileges on database gotosocial to gotosocial;
############################
# Config pertaining to the Gotosocial database connection
-db:
- # String. Database type.
- # Options: ["postgres","sqlite"]
- # Default: "postgres"
- type: "postgres"
+# String. Database type.
+# Options: ["postgres","sqlite"]
+# Default: "postgres"
+db-type: "postgres"
- # String. Database address or parameters.
- # Examples: ["localhost","my.db.host","127.0.0.1","192.111.39.110",":memory:"]
- # Default: "localhost"
- address: "127.0.0.1"
+# String. Database address or parameters.
+# Examples: ["localhost","my.db.host","127.0.0.1","192.111.39.110",":memory:"]
+# Default: "localhost"
+db-address: "127.0.0.1"
- # Int. Port for database connection.
- # Examples: [5432, 1234, 6969]
- # Default: 5432
- port: 5432
+# Int. Port for database connection.
+# Examples: [5432, 1234, 6969]
+# Default: 5432
+db-port: 5432
- # String. Username for the database connection.
- # Examples: ["mydbuser","postgres","gotosocial"]
- # Default: "postgres"
- user: "postgres"
+# String. Username for the database connection.
+# Examples: ["mydbuser","postgres","gotosocial"]
+# Default: "postgres"
+db-user: "postgres"
- # REQUIRED
- # String. Password to use for the database connection
- # Examples: ["password123","verysafepassword","postgres"]
- # Default: "postgres"
- password: "postgres"
+# REQUIRED
+# String. Password to use for the database connection
+# Examples: ["password123","verysafepassword","postgres"]
+# Default: "postgres"
+db-password: "postgres"
- # String. Name of the database to use within the provided database type.
- # Examples: ["mydb","postgres","gotosocial"]
- # Default: "postgres"
- database: "postgres"
+# String. Name of the database to use within the provided database type.
+# Examples: ["mydb","postgres","gotosocial"]
+# Default: "postgres"
+db-database: "postgres"
- # String. Disable, enable, or require SSL/TLS connection to the database.
- # If "disable" then no TLS connection will be attempted.
- # If "enable" then TLS will be tried, but the database certificate won't be checked (for self-signed certs).
- # If "require" then TLS will be required to make a connection, and a valid certificate must be presented.
- # Options: ["disable", "enable", "require"]
- # Default: "disable"
- tlsMode: "disable"
+# String. Disable, enable, or require SSL/TLS connection to the database.
+# If "disable" then no TLS connection will be attempted.
+# If "enable" then TLS will be tried, but the database certificate won't be checked (for self-signed certs).
+# If "require" then TLS will be required to make a connection, and a valid certificate must be presented.
+# Options: ["disable", "enable", "require"]
+# Default: "disable"
+db-tls-mode: "disable"
- # String. Path to a CA certificate on the host machine for db certificate validation.
- # If this is left empty, just the host certificates will be used.
- # If filled in, the certificate will be loaded and added to host certificates.
- # Examples: ["/path/to/some/cert.crt"]
- # Default: ""
- tlsCACert: ""
+# String. Path to a CA certificate on the host machine for db certificate validation.
+# If this is left empty, just the host certificates will be used.
+# If filled in, the certificate will be loaded and added to host certificates.
+# Examples: ["/path/to/some/cert.crt"]
+# Default: ""
+db-tls-ca-cert: ""
```
diff --git a/docs/configuration/general.md b/docs/configuration/general.md
index be27095d1..5e23d4699 100644
--- a/docs/configuration/general.md
+++ b/docs/configuration/general.md
@@ -14,12 +14,12 @@ The only things you *really* need to set here are `host`, which should be the ho
# String. Log level to use throughout the application. Must be lower-case.
# Options: ["trace","debug","info","warn","error","fatal"]
# Default: "info"
-logLevel: "info"
+log-level: "info"
# String. Application name to use internally.
# Examples: ["My Application","gotosocial"]
# Default: "gotosocial"
-applicationName: "gotosocial"
+application-name: "gotosocial"
# String. Hostname that this server will be reachable at. Defaults to localhost for local testing,
# but you should *definitely* change this when running for real, or your server won't work at all.
@@ -38,7 +38,7 @@ host: "localhost"
# DO NOT change this after your server has already run once, or you will break things!
# Examples: ["example.org","server.com"]
# Default: ""
-accountDomain: ""
+account-domain: ""
# String. Protocol to use for the server. Only change to http for local testing!
# This should be the protocol part of the URI that your server is actually reachable on. So even if you're
@@ -55,7 +55,7 @@ protocol: "https"
# you have specific networking requirements.
# Examples: ["0.0.0.0", "172.128.0.16", "localhost", "[::]", "[2001:db8::fed1]"]
# Default: "0.0.0.0"
-bindAddress: "0.0.0.0"
+bind-address: "0.0.0.0"
# Int. Listen port for the GoToSocial webserver + API. If you're running behind a reverse proxy and/or in a docker,
# container, just set this to whatever you like (or leave the default), and make sure it's forwarded properly.
@@ -71,6 +71,6 @@ port: 8080
# or the gateway of the docker network, and/or the address of the reverse proxy (if it's not running on the host network).
# Example: ["127.0.0.1/32", "172.20.0.1"]
# Default: ["127.0.0.1/32"] (localhost)
-trustedProxies:
+trusted-proxies:
- "127.0.0.1/32"
```
diff --git a/docs/configuration/index.md b/docs/configuration/index.md
index ef27a0725..ed36505ec 100644
--- a/docs/configuration/index.md
+++ b/docs/configuration/index.md
@@ -4,8 +4,6 @@ GoToSocial aims to be as configurable as possible, to fit lots of different use
We try to provide sensible defaults wherever possible, but you can't run a GoToSocial instance without managing *some* configuration.
-In this section, we describe the different methods available for configuring GoToSocial,
-
## Configuration Methods
There are three different methods for configuring a GoToSocial instance, which can be combined depending on your setup.
@@ -15,41 +13,39 @@ There are three different methods for configuring a GoToSocial instance, which c
The easiest way to configure GoToSocial is to pass a configuration file to to the `gotosocial server start` command, for example:
```bash
-gotosocial --config-path ./config.yaml server start
+gotosocial server start --config-path ./config.yaml
```
-The command expects a file in [YAML](https://en.wikipedia.org/wiki/YAML) format.
+The command expects a file in [YAML](https://en.wikipedia.org/wiki/YAML) or [JSON](https://en.wikipedia.org/wiki/JSON) format.
An example configuration file, with an explanation of each of the config fields, with default and example values, can be found [here](https://github.com/superseriousbusiness/gotosocial/blob/main/example/config.yaml).
-This example file is included with release downloads, so you can just copy it and edit it to your needs without having to worry too much about what the hell YAML is.
+This example file is included with release downloads, so you can just copy it and edit it to your needs without having to worry too much about what the hell YAML or JSON is.
### Environment Variables
-You can also configure GoToSocial by setting [environment variables](https://en.wikipedia.org/wiki/Environment_variable). These environment variables generally follow the format:
+You can also configure GoToSocial by setting [environment variables](https://en.wikipedia.org/wiki/Environment_variable). These environment variables follow the format:
+
+1. Prepend `GTS_` to the config flag.
+2. Uppercase-all.
+3. Replace dash (`-`) with underscore (`_`).
+
+So for example, instead of setting `media-image-max-size` to `2097152` in your config.yaml, you could set the environment variable:
```text
-GTS_[CONFIG_SECTION]_[FIELD_NAME]
+GTS_MEDIA_IMAGE_MAX_SIZE=2097152
```
-So for example, instead of setting `media.maxImageSize` to `2097152` in your config.yaml, you could set the environment variable:
-
-```text
-GTS_MEDIA_MAX_IMAGE_SIZE=2097152
-```
-
-If you're in doubt about any of the names of these environment variables, just check `gotosocial --help`. They're all listed there.
+If you're in doubt about any of the names of these environment variables, just check the `--help` for the subcommand you're using.
### Command Line Flags
-Finally, you can set configuration values using command-line flags, which you pass directly when you're running a `gotosocial` command. For example, instead of setting `media.maxImageSize` in your config.yaml, or with an environment variable, you can pass the value directly through the command line:
+Finally, you can set configuration values using command-line flags, which you pass directly when you're running a `gotosocial` command. For example, instead of setting `media-image-max-size` in your config.yaml, or with an environment variable, you can pass the value directly through the command line:
```bash
-gotosocial --media-max-image-size 2097152 server start
+gotosocial server start --media-image-max-size 2097152
```
-Note the weird order of the above command; `gotosocial` first, then all global variables (like the ones in the config.yaml), then the command you want to execute.
-
If you're in doubt about which flags are available, check `gotosocial --help`.
## Priority
@@ -60,9 +56,9 @@ The above configuration methods override each other in the order in which they w
command line flags > environment variables > config file
```
-That is, if you set `media.maxImageSize` to `2097152` in your config file, but then *ALSO* set the environment variable `GTS_MEDIA_MAX_IMAGE_SIZE=9999999`, then the final value will be `9999999`, because environment variables have a *higher priority* than values set in config.yaml.
+That is, if you set `media-image-max-size` to `2097152` in your config file, but then *ALSO* set the environment variable `GTS_MEDIA_MAX_IMAGE_SIZE=9999999`, then the final value will be `9999999`, because environment variables have a *higher priority* than values set in config.yaml.
-Command line flags have the highest priority, so if you set `--media-max-image-size 13121312`, then the final value will be `13121312` regardless of what you've set elsewhere.
+Command line flags have the highest priority, so if you set `--media-image-max-size 13121312`, then the final value will be `13121312` regardless of what you've set elsewhere.
This means in cases where you want to just try changing one thing, but don't want to edit your config file, you can temporarily use an environment variable or a command line flag to set that one thing.
diff --git a/docs/configuration/letsencrypt.md b/docs/configuration/letsencrypt.md
index 05953cbb7..4e3a6d59b 100644
--- a/docs/configuration/letsencrypt.md
+++ b/docs/configuration/letsencrypt.md
@@ -8,36 +8,35 @@
##############################
# Config pertaining to the automatic acquisition and use of LetsEncrypt HTTPS certificates.
-letsEncrypt:
- # Bool. Whether or not letsencrypt should be enabled for the server.
- # If false, the rest of the settings here will be ignored.
- # You should only change this if you want to serve GoToSocial behind a reverse proxy
- # like Traefik, HAProxy, or Nginx.
- # Options: [true, false]
- # Default: true
- enabled: true
+# Bool. Whether or not letsencrypt should be enabled for the server.
+# If false, the rest of the settings here will be ignored.
+# You should only change this if you want to serve GoToSocial behind a reverse proxy
+# like Traefik, HAProxy, or Nginx.
+# Options: [true, false]
+# Default: true
+letsencrypt-enabled: true
- # Int. Port to listen for letsencrypt certificate challenges on.
- # If letsencrypt is enabled, this port must be reachable or you won't be able to obtain certs.
- # If letsencrypt is disabled, this port will not be used.
- # This *must not* be the same as the webserver/API port specified above.
- # Examples: [80, 8000, 1312]
- # Default: 80
- port: 80
+# Int. Port to listen for letsencrypt certificate challenges on.
+# If letsencrypt is enabled, this port must be reachable or you won't be able to obtain certs.
+# If letsencrypt is disabled, this port will not be used.
+# This *must not* be the same as the webserver/API port specified above.
+# Examples: [80, 8000, 1312]
+# Default: 80
+letsencrypt-port: 80
- # String. Directory in which to store LetsEncrypt certificates.
- # It is a good move to make this a sub-path within your storage directory, as it makes
- # backup easier, but you might wish to move them elsewhere if they're also accessed by other services.
- # In any case, make sure GoToSocial has permissions to write to / read from this directory.
- # Examples: ["/home/gotosocial/storage/certs", "/acmecerts"]
- # Default: "/gotosocial/storage/certs"
- certDir: "/gotosocial/storage/certs"
+# String. Directory in which to store LetsEncrypt certificates.
+# It is a good move to make this a sub-path within your storage directory, as it makes
+# backup easier, but you might wish to move them elsewhere if they're also accessed by other services.
+# In any case, make sure GoToSocial has permissions to write to / read from this directory.
+# Examples: ["/home/gotosocial/storage/certs", "/acmecerts"]
+# Default: "/gotosocial/storage/certs"
+letsencrypt-cert-dir: "/gotosocial/storage/certs"
- # String. Email address to use when registering LetsEncrypt certs.
- # Most likely, this will be the email address of the instance administrator.
- # LetsEncrypt will send notifications about expiring certificates etc to this address.
- # Examples: ["admin@example.org"]
- # Default: ""
- emailAddress: ""
+# String. Email address to use when registering LetsEncrypt certs.
+# Most likely, this will be the email address of the instance administrator.
+# LetsEncrypt will send notifications about expiring certificates etc to this address.
+# Examples: ["admin@example.org"]
+# Default: ""
+letsencrypt-email-address: ""
```
diff --git a/docs/configuration/media.md b/docs/configuration/media.md
index 2c5e778f7..393274726 100644
--- a/docs/configuration/media.md
+++ b/docs/configuration/media.md
@@ -8,25 +8,24 @@
########################
# Config pertaining to user media uploads (videos, image, image descriptions).
-media:
- # Int. Maximum allowed image upload size in bytes.
- # Examples: [2097152, 10485760]
- # Default: 2097152 -- aka 2MB
- maxImageSize: 2097152
+# Int. Maximum allowed image upload size in bytes.
+# Examples: [2097152, 10485760]
+# Default: 2097152 -- aka 2MB
+media-image-max-size: 2097152
- # Int. Maximum allowed video upload size in bytes.
- # Examples: [2097152, 10485760]
- # Default: 10485760 -- aka 10MB
- maxVideoSize: 10485760
+# Int. Maximum allowed video upload size in bytes.
+# Examples: [2097152, 10485760]
+# Default: 10485760 -- aka 10MB
+media-video-max-size: 10485760
- # Int. Minimum amount of characters required as an image or video description.
- # Examples: [500, 1000, 1500]
- # Default: 0 (not required)
- minDescriptionChars: 0
+# Int. Minimum amount of characters required as an image or video description.
+# Examples: [500, 1000, 1500]
+# Default: 0 (not required)
+media-description-min-chars: 0
- # Int. Maximum amount of characters permitted in an image or video description.
- # Examples: [500, 1000, 1500]
- # Default: 500
- maxDescriptionChars: 500
+# Int. Maximum amount of characters permitted in an image or video description.
+# Examples: [500, 1000, 1500]
+# Default: 500
+media-description-max-chars: 500
```
diff --git a/docs/configuration/oidc.md b/docs/configuration/oidc.md
index 6c6bbe94b..e373e48d9 100644
--- a/docs/configuration/oidc.md
+++ b/docs/configuration/oidc.md
@@ -20,55 +20,54 @@ GoToSocial exposes the following configuration settings for OIDC, shown below wi
#######################
# Config for authentication with an external OIDC provider (Dex, Google, Auth0, etc).
-oidc:
- # Bool. Enable authentication with external OIDC provider. If set to true, then
- # the other OIDC options must be set as well. If this is set to false, then the standard
- # internal oauth flow will be used, where users sign in to GtS with username/password.
- # Options: [true, false]
- # Default: false
- enabled: false
+# Bool. Enable authentication with external OIDC provider. If set to true, then
+# the other OIDC options must be set as well. If this is set to false, then the standard
+# internal oauth flow will be used, where users sign in to GtS with username/password.
+# Options: [true, false]
+# Default: false
+oidc-enabled: false
- # String. Name of the oidc idp (identity provider). This will be shown to users when
- # they log in.
- # Examples: ["Google", "Dex", "Auth0"]
- # Default: ""
- idpName: ""
+# String. Name of the oidc idp (identity provider). This will be shown to users when
+# they log in.
+# Examples: ["Google", "Dex", "Auth0"]
+# Default: ""
+oidc-idp-name: ""
- # Bool. Skip the normal verification flow of tokens returned from the OIDC provider, ie.,
- # don't check the expiry or signature. This should only be used in debugging or testing,
- # never ever in a production environment as it's extremely unsafe!
- # Options: [true, false]
- # Default: false
- skipVerification: false
+# Bool. Skip the normal verification flow of tokens returned from the OIDC provider, ie.,
+# don't check the expiry or signature. This should only be used in debugging or testing,
+# never ever in a production environment as it's extremely unsafe!
+# Options: [true, false]
+# Default: false
+oidc-skip-verification: false
- # String. The OIDC issuer URI. This is where GtS will redirect users to for login.
- # Typically this will look like a standard web URL.
- # Examples: ["https://auth.example.org", "https://example.org/auth"]
- # Default: ""
- issuer: ""
+# String. The OIDC issuer URI. This is where GtS will redirect users to for login.
+# Typically this will look like a standard web URL.
+# Examples: ["https://auth.example.org", "https://example.org/auth"]
+# Default: ""
+oidc-issuer: ""
- # String. The ID for this client as registered with the OIDC provider.
- # Examples: ["some-client-id", "fda3772a-ad35-41c9-9a59-f1943ad18f54"]
- # Default: ""
- clientID: ""
+# String. The ID for this client as registered with the OIDC provider.
+# Examples: ["some-client-id", "fda3772a-ad35-41c9-9a59-f1943ad18f54"]
+# Default: ""
+oidc-client-id: ""
- # String. The secret for this client as registered with the OIDC provider.
- # Examples: ["super-secret-business", "79379cf5-8057-426d-bb83-af504d98a7b0"]
- # Default: ""
- clientSecret: ""
+# String. The secret for this client as registered with the OIDC provider.
+# Examples: ["super-secret-business", "79379cf5-8057-426d-bb83-af504d98a7b0"]
+# Default: ""
+oidc-client-secret: ""
- # Array of string. Scopes to request from the OIDC provider. The returned values will be used to
- # populate users created in GtS as a result of the authentication flow. 'openid' and 'email' are required.
- # 'profile' is used to extract a username for the newly created user.
- # 'groups' is optional and can be used to determine if a user is an admin (if they're in the group 'admin' or 'admins').
- # Examples: See eg., https://auth0.com/docs/scopes/openid-connect-scopes
- # Default: ["openid", "email", "profile", "groups"]
- scopes:
- - "openid"
- - "email"
- - "profile"
- - "groups"
+# Array of string. Scopes to request from the OIDC provider. The returned values will be used to
+# populate users created in GtS as a result of the authentication flow. 'openid' and 'email' are required.
+# 'profile' is used to extract a username for the newly created user.
+# 'groups' is optional and can be used to determine if a user is an admin (if they're in the group 'admin' or 'admins').
+# Examples: See eg., https://auth0.com/docs/scopes/openid-connect-scopes
+# Default: ["openid", "email", "profile", "groups"]
+oidc-scopes:
+ - "openid"
+ - "email"
+ - "profile"
+ - "groups"
```
## Behavior
diff --git a/docs/configuration/smtp.md b/docs/configuration/smtp.md
index d191be3c8..b87847abd 100644
--- a/docs/configuration/smtp.md
+++ b/docs/configuration/smtp.md
@@ -16,32 +16,35 @@ The configuration options for smtp are as follows:
#######################
# Config for sending emails via an smtp server. See https://en.wikipedia.org/wiki/Simple_Mail_Transfer_Protocol
-smtp:
- # String. The hostname of the smtp server you want to use.
- # If this is not set, smtp will not be used to send emails, and you can ignore the other settings.
- # Examples: ["mail.example.org", "localhost"]
- # Default: ""
- host: ""
- # Int. Port to use to connect to the smtp server.
- # Examples: []
- # Default: 0
- port: 0
- # String. Username to use when authenticating with the smtp server.
- # This should have been provided to you by your smtp host.
- # This is often, but not always, an email address.
- # Examples: ["maillord@example.org"]
- # Default: ""
- username:
- # String. Password to use when authenticating with the smtp server.
- # This should have been provided to you by your smtp host.
- # Examples: ["1234", "password"]
- # Default: ""
- password:
- # String. 'From' address for sent emails.
- # Examples: ["mail@example.org"]
- # Default: ""
- from: ""
+# String. The hostname of the smtp server you want to use.
+# If this is not set, smtp will not be used to send emails, and you can ignore the other settings.
+# Examples: ["mail.example.org", "localhost"]
+# Default: ""
+smtp-host: ""
+
+# Int. Port to use to connect to the smtp server.
+# Examples: []
+# Default: 0
+smtp-port: 0
+
+# String. Username to use when authenticating with the smtp server.
+# This should have been provided to you by your smtp host.
+# This is often, but not always, an email address.
+# Examples: ["maillord@example.org"]
+# Default: ""
+smtp-username: ""
+
+# String. Password to use when authenticating with the smtp server.
+# This should have been provided to you by your smtp host.
+# Examples: ["1234", "password"]
+# Default: ""
+smtp-password: ""
+
+# String. 'From' address for sent emails.
+# Examples: ["mail@example.org"]
+# Default: ""
+smtp-from: ""
```
Note that if you don't set `Host`, then email sending via smtp will be disabled, and the other settings will be ignored. GoToSocial will still log (at trace level) emails that *would* have been sent if smtp was enabled.
diff --git a/docs/configuration/statuses.md b/docs/configuration/statuses.md
index 606776a83..e0348dd5a 100644
--- a/docs/configuration/statuses.md
+++ b/docs/configuration/statuses.md
@@ -3,43 +3,39 @@
## Settings
```yaml
-##########################
-##### STORAGE CONFIG #####
-##########################
+###########################
+##### STATUSES CONFIG #####
+###########################
-# Config pertaining to storage of user-created uploads (videos, images, etc).
-storage:
+# Config pertaining to the creation of statuses/posts, and permitted limits.
- # String. Type of storage backend to use.
- # Examples: ["local", "s3"]
- # Default: "local" (storage on local disk)
- # NOTE: s3 storage is not yet supported!
- backend: "local"
+# Int. Maximum amount of characters permitted for a new status.
+# Note that going way higher than the default might break federation.
+# Examples: [140, 500, 5000]
+# Default: 5000
+statuses-max-chars: 5000
- # String. Directory to use as a base path for storing files.
- # Make sure whatever user/group gotosocial is running as has permission to access
- # this directly, and create new subdirectories and files with in.
- # Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
- # Default: "/gotosocial/storage"
- basePath: "/gotosocial/storage"
+# Int. Maximum amount of characters allowed in the CW/subject header of a status.
+# Note that going way higher than the default might break federation.
+# Examples: [100, 200]
+# Default: 100
+statuses-cw-max-chars: 100
- # String. Protocol to use for serving stored files.
- # It's very unlikely that you'll need to change this ever, but there might be edge cases.
- # Examples: ["http", "https"]
- serveProtocol: "https"
+# Int. Maximum amount of options to permit when creating a new poll.
+# Note that going way higher than the default might break federation.
+# Examples: [4, 6, 10]
+# Default: 6
+statuses-poll-max-options: 6
- # String. Host for serving stored files.
- # If you're using local storage, this should be THE SAME as the value you've set for Host, above.
- # It should only be a different value if you're serving stored files from a host
- # other than the one your instance is running on.
- # Examples: ["localhost", "example.org"]
- # Default: "localhost" -- you should absolutely change this.
- serveHost: "localhost"
+# Int. Maximum amount of characters to permit per poll option when creating a new poll.
+# Note that going way higher than the default might break federation.
+# Examples: [50, 100, 150]
+# Default: 50
+statuses-poll-option-max-chars: 50
- # String. Base path for serving stored files. This will be added to serveHost and serveProtocol
- # to form the prefix url of your stored files. Eg., https://example.org/fileserver/.....
- # It's unlikely that you will need to change this.
- # Examples: ["/fileserver", "/media"]
- # Default: "/fileserver"
- serveBasePath: "/fileserver"
+# Int. Maximum amount of media files that can be attached to a new status.
+# Note that going way higher than the default might break federation.
+# Examples: [4, 6, 10]
+# Default: 6
+statuses-media-max-files: 6
```
diff --git a/docs/configuration/storage.md b/docs/configuration/storage.md
index f62d59581..b0fadcd4c 100644
--- a/docs/configuration/storage.md
+++ b/docs/configuration/storage.md
@@ -8,38 +8,37 @@
##########################
# Config pertaining to storage of user-created uploads (videos, images, etc).
-storage:
- # String. Type of storage backend to use.
- # Examples: ["local", "s3"]
- # Default: "local" (storage on local disk)
- # NOTE: s3 storage is not yet supported!
- backend: "local"
+# String. Type of storage backend to use.
+# Examples: ["local", "s3"]
+# Default: "local" (storage on local disk)
+# NOTE: s3 storage is not yet supported!
+storage-backend: "local"
- # String. Directory to use as a base path for storing files.
- # Make sure whatever user/group gotosocial is running as has permission to access
- # this directly, and create new subdirectories and files with in.
- # Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
- # Default: "/gotosocial/storage"
- basePath: "/gotosocial/storage"
+# String. Directory to use as a base path for storing files.
+# Make sure whatever user/group gotosocial is running as has permission to access
+# this directly, and create new subdirectories and files with in.
+# Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
+# Default: "/gotosocial/storage"
+storage-base-path: "/gotosocial/storage"
- # String. Protocol to use for serving stored files.
- # It's very unlikely that you'll need to change this ever, but there might be edge cases.
- # Examples: ["http", "https"]
- serveProtocol: "https"
+# String. Protocol to use for serving stored files.
+# It's very unlikely that you'll need to change this ever, but there might be edge cases.
+# Examples: ["http", "https"]
+storage-serve-protocol: "https"
- # String. Host for serving stored files.
- # If you're using local storage, this should be THE SAME as the value you've set for Host, above.
- # It should only be a different value if you're serving stored files from a host
- # other than the one your instance is running on.
- # Examples: ["localhost", "example.org"]
- # Default: "localhost" -- you should absolutely change this.
- serveHost: "localhost"
+# String. Host for serving stored files.
+# If you're using local storage, this should be THE SAME as the value you've set for Host, above.
+# It should only be a different value if you're serving stored files from a host
+# other than the one your instance is running on.
+# Examples: ["localhost", "example.org"]
+# Default: "localhost" -- you should absolutely change this.
+storage-serve-host: "localhost"
- # String. Base path for serving stored files. This will be added to serveHost and serveProtocol
- # to form the prefix url of your stored files. Eg., https://example.org/fileserver/.....
- # It's unlikely that you will need to change this.
- # Examples: ["/fileserver", "/media"]
- # Default: "/fileserver"
- serveBasePath: "/fileserver"
-```
\ No newline at end of file
+# String. Base path for serving stored files. This will be added to serveHost and serveProtocol
+# to form the prefix url of your stored files. Eg., https://example.org/fileserver/.....
+# It's unlikely that you will need to change this.
+# Examples: ["/fileserver", "/media"]
+# Default: "/fileserver"
+storage-serve-base-path: "/fileserver"
+```
diff --git a/docs/configuration/template.md b/docs/configuration/template.md
deleted file mode 100644
index 2c0fd5a61..000000000
--- a/docs/configuration/template.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Template
-
-## Settings
-
-```yaml
-###############################
-##### WEB TEMPLATE CONFIG #####
-###############################
-
-# Config pertaining to templating of web pages/email notifications and the like
-template:
-
- # String. Directory from which gotosocial will attempt to load html templates (.tmpl files).
- # Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
- # Default: "./web/template/"
- baseDir: "./web/template/"
-
- # String. Directory from which gotosocial will attempt to serve static web assets (images, scripts).
- # Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
- # Default: "./web/assets/"
- assetBaseDir: "./web/assets/"
-```
diff --git a/docs/configuration/web.md b/docs/configuration/web.md
new file mode 100644
index 000000000..99533a287
--- /dev/null
+++ b/docs/configuration/web.md
@@ -0,0 +1,21 @@
+# Web
+
+## Settings
+
+```yaml
+######################
+##### WEB CONFIG #####
+######################
+
+# Config pertaining to templating and serving of web pages/email notifications and the like
+
+# String. Directory from which gotosocial will attempt to load html templates (.tmpl files).
+# Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
+# Default: "./web/template/"
+web-template-base-dir: "./web/template/"
+
+# String. Directory from which gotosocial will attempt to serve static web assets (images, scripts).
+# Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
+# Default: "./web/assets/"
+web-asset-base-dir: "./web/assets/"
+```
diff --git a/docs/installation_guide/binary.md b/docs/installation_guide/binary.md
index 7668999d3..576dc52b3 100644
--- a/docs/installation_guide/binary.md
+++ b/docs/installation_guide/binary.md
@@ -63,7 +63,7 @@ You can now run the binary.
Start the GoToSocial server with the following command:
```bash
-./gotosocial --config-path ./config.yaml server start
+./gotosocial server start --config-path ./config.yaml
```
The server should now start up and you should be able to access the splash page by navigating to your domain in the browser. Note that it might take up to a minute or so for your LetsEncrypt certificates to be created for the first time, so refresh a few times if necessary.
@@ -77,7 +77,7 @@ You can use the GoToSocial binary to also create, confirm, and promote your user
Run the following command to create a new account:
```bash
-./gotosocial --config-path ./config.yaml admin account create --username some_username --email some_email@whatever.org --password SOME_PASSWORD
+./gotosocial admin account create --config-path ./config.yaml --username some_username --email some_email@whatever.org --password SOME_PASSWORD
```
In the above command, replace `some_username` with your desired username, `some_email@whatever.org` with the email address you want to associate with your account, and `SOME_PASSWORD` with a secure password.
@@ -85,7 +85,7 @@ In the above command, replace `some_username` with your desired username, `some_
Run the following command to confirm the account you just created:
```bash
-./gotosocial --config-path ./config.yaml admin account confirm --username some_username
+./gotosocial admin account confirm --config-path ./config.yaml --username some_username
```
Replace `some_username` with the username of the account you just created.
@@ -93,7 +93,7 @@ Replace `some_username` with the username of the account you just created.
If you want your user to have admin rights, you can promote them using a similar command:
```bash
-./gotosocial --config-path ./config.yaml admin account promote --username some_username
+./gotosocial admin account promote --config-path ./config.yaml --username some_username
```
Replace `some_username` with the username of the account you just created.
diff --git a/docs/installation_guide/index.md b/docs/installation_guide/index.md
index 34b7903b1..b28deb7ff 100644
--- a/docs/installation_guide/index.md
+++ b/docs/installation_guide/index.md
@@ -16,7 +16,7 @@ If you decide to use a VPS instead, you can just spin yourself up something chea
[Greenhost](https://greenhost.net) is also great: it has zero co2 emissions, but is a bit more costly.
-## Ports
+## Ports
The installation guides won't go into running [UFW](https://www.digitalocean.com/community/tutorials/how-to-set-up-a-firewall-with-ufw-on-ubuntu-18-04) and [Fail2Ban](https://linuxize.com/post/install-configure-fail2ban-on-ubuntu-20-04/) but you absolutely should do that.
diff --git a/example/config.yaml b/example/config.yaml
index 2295acf91..4999b10f4 100644
--- a/example/config.yaml
+++ b/example/config.yaml
@@ -21,12 +21,12 @@
# String. Log level to use throughout the application. Must be lower-case.
# Options: ["trace","debug","info","warn","error","fatal"]
# Default: "info"
-logLevel: "info"
+log-level: "info"
# String. Application name to use internally.
# Examples: ["My Application","gotosocial"]
# Default: "gotosocial"
-applicationName: "gotosocial"
+application-name: "gotosocial"
# String. Hostname that this server will be reachable at. Defaults to localhost for local testing,
# but you should *definitely* change this when running for real, or your server won't work at all.
@@ -45,7 +45,7 @@ host: "localhost"
# DO NOT change this after your server has already run once, or you will break things!
# Examples: ["example.org","server.com"]
# Default: ""
-accountDomain: ""
+account-domain: ""
# String. Protocol to use for the server. Only change to http for local testing!
# This should be the protocol part of the URI that your server is actually reachable on. So even if you're
@@ -62,7 +62,7 @@ protocol: "https"
# you have specific networking requirements.
# Examples: ["0.0.0.0", "172.128.0.16", "localhost", "[::]", "[2001:db8::fed1]"]
# Default: "0.0.0.0"
-bindAddress: "0.0.0.0"
+bind-address: "0.0.0.0"
# Int. Listen port for the GoToSocial webserver + API. If you're running behind a reverse proxy and/or in a docker,
# container, just set this to whatever you like (or leave the default), and make sure it's forwarded properly.
@@ -78,7 +78,7 @@ port: 8080
# or the gateway of the docker network, and/or the address of the reverse proxy (if it's not running on the host network).
# Example: ["127.0.0.1/32", "172.20.0.1"]
# Default: ["127.0.0.1/32"] (localhost)
-trustedProxies:
+trusted-proxies:
- "127.0.0.1/32"
############################
@@ -86,318 +86,313 @@ trustedProxies:
############################
# Config pertaining to the Gotosocial database connection
-db:
- # String. Database type.
- # Options: ["postgres","sqlite"]
- # Default: "postgres"
- type: "postgres"
+# String. Database type.
+# Options: ["postgres","sqlite"]
+# Default: "postgres"
+db-type: "postgres"
- # String. Database address or parameters.
- # Examples: ["localhost","my.db.host","127.0.0.1","192.111.39.110",":memory:"]
- # Default: "localhost"
- address: "127.0.0.1"
+# String. Database address or parameters.
+# Examples: ["localhost","my.db.host","127.0.0.1","192.111.39.110",":memory:"]
+# Default: "localhost"
+db-address: "127.0.0.1"
- # Int. Port for database connection.
- # Examples: [5432, 1234, 6969]
- # Default: 5432
- port: 5432
+# Int. Port for database connection.
+# Examples: [5432, 1234, 6969]
+# Default: 5432
+db-port: 5432
- # String. Username for the database connection.
- # Examples: ["mydbuser","postgres","gotosocial"]
- # Default: "postgres"
- user: "postgres"
+# String. Username for the database connection.
+# Examples: ["mydbuser","postgres","gotosocial"]
+# Default: "postgres"
+db-user: "postgres"
- # REQUIRED
- # String. Password to use for the database connection
- # Examples: ["password123","verysafepassword","postgres"]
- # Default: "postgres"
- password: "postgres"
+# REQUIRED
+# String. Password to use for the database connection
+# Examples: ["password123","verysafepassword","postgres"]
+# Default: "postgres"
+db-password: "postgres"
- # String. Name of the database to use within the provided database type.
- # Examples: ["mydb","postgres","gotosocial"]
- # Default: "postgres"
- database: "postgres"
+# String. Name of the database to use within the provided database type.
+# Examples: ["mydb","postgres","gotosocial"]
+# Default: "postgres"
+db-database: "postgres"
- # String. Disable, enable, or require SSL/TLS connection to the database.
- # If "disable" then no TLS connection will be attempted.
- # If "enable" then TLS will be tried, but the database certificate won't be checked (for self-signed certs).
- # If "require" then TLS will be required to make a connection, and a valid certificate must be presented.
- # Options: ["disable", "enable", "require"]
- # Default: "disable"
- tlsMode: "disable"
+# String. Disable, enable, or require SSL/TLS connection to the database.
+# If "disable" then no TLS connection will be attempted.
+# If "enable" then TLS will be tried, but the database certificate won't be checked (for self-signed certs).
+# If "require" then TLS will be required to make a connection, and a valid certificate must be presented.
+# Options: ["disable", "enable", "require"]
+# Default: "disable"
+db-tls-mode: "disable"
- # String. Path to a CA certificate on the host machine for db certificate validation.
- # If this is left empty, just the host certificates will be used.
- # If filled in, the certificate will be loaded and added to host certificates.
- # Examples: ["/path/to/some/cert.crt"]
- # Default: ""
- tlsCACert: ""
+# String. Path to a CA certificate on the host machine for db certificate validation.
+# If this is left empty, just the host certificates will be used.
+# If filled in, the certificate will be loaded and added to host certificates.
+# Examples: ["/path/to/some/cert.crt"]
+# Default: ""
+db-tls-ca-cert: ""
-###############################
-##### WEB TEMPLATE CONFIG #####
-###############################
+######################
+##### WEB CONFIG #####
+######################
-# Config pertaining to templating of web pages/email notifications and the like
-template:
+# Config pertaining to templating and serving of web pages/email notifications and the like
- # String. Directory from which gotosocial will attempt to load html templates (.tmpl files).
- # Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
- # Default: "./web/template/"
- baseDir: "./web/template/"
+# String. Directory from which gotosocial will attempt to load html templates (.tmpl files).
+# Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
+# Default: "./web/template/"
+web-template-base-dir: "./web/template/"
- # String. Directory from which gotosocial will attempt to serve static web assets (images, scripts).
- # Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
- # Default: "./web/assets/"
- assetBaseDir: "./web/assets/"
+# String. Directory from which gotosocial will attempt to serve static web assets (images, scripts).
+# Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
+# Default: "./web/assets/"
+web-asset-base-dir: "./web/assets/"
###########################
##### ACCOUNTS CONFIG #####
###########################
# Config pertaining to creation and maintenance of accounts on the server, as well as defaults for new accounts.
-accounts:
- # Bool. Do we want people to be able to just submit sign up requests, or do we want invite only?
- # Options: [true, false]
- # Default: true
- openRegistration: true
+# Bool. Do we want people to be able to just submit sign up requests, or do we want invite only?
+# Options: [true, false]
+# Default: true
+accounts-registration-open: true
- # Bool. Do sign up requests require approval from an admin/moderator before an account can sign in/use the server?
- # Options: [true, false]
- # Default: true
- requireApproval: true
+# Bool. Do sign up requests require approval from an admin/moderator before an account can sign in/use the server?
+# Options: [true, false]
+# Default: true
+accounts-approval-required: true
- # Bool. Are sign up requests required to submit a reason for the request (eg., an explanation of why they want to join the instance)?
- # Options: [true, false]
- # Default: true
- reasonRequired: true
+# Bool. Are sign up requests required to submit a reason for the request (eg., an explanation of why they want to join the instance)?
+# Options: [true, false]
+# Default: true
+accounts-reason-required: true
########################
##### MEDIA CONFIG #####
########################
# Config pertaining to user media uploads (videos, image, image descriptions).
-media:
- # Int. Maximum allowed image upload size in bytes.
- # Examples: [2097152, 10485760]
- # Default: 2097152 -- aka 2MB
- maxImageSize: 2097152
+# Int. Maximum allowed image upload size in bytes.
+# Examples: [2097152, 10485760]
+# Default: 2097152 -- aka 2MB
+media-image-max-size: 2097152
- # Int. Maximum allowed video upload size in bytes.
- # Examples: [2097152, 10485760]
- # Default: 10485760 -- aka 10MB
- maxVideoSize: 10485760
+# Int. Maximum allowed video upload size in bytes.
+# Examples: [2097152, 10485760]
+# Default: 10485760 -- aka 10MB
+media-video-max-size: 10485760
- # Int. Minimum amount of characters required as an image or video description.
- # Examples: [500, 1000, 1500]
- # Default: 0 (not required)
- minDescriptionChars: 0
+# Int. Minimum amount of characters required as an image or video description.
+# Examples: [500, 1000, 1500]
+# Default: 0 (not required)
+media-description-min-chars: 0
- # Int. Maximum amount of characters permitted in an image or video description.
- # Examples: [500, 1000, 1500]
- # Default: 500
- maxDescriptionChars: 500
+# Int. Maximum amount of characters permitted in an image or video description.
+# Examples: [500, 1000, 1500]
+# Default: 500
+media-description-max-chars: 500
##########################
##### STORAGE CONFIG #####
##########################
# Config pertaining to storage of user-created uploads (videos, images, etc).
-storage:
- # String. Type of storage backend to use.
- # Examples: ["local", "s3"]
- # Default: "local" (storage on local disk)
- # NOTE: s3 storage is not yet supported!
- backend: "local"
+# String. Type of storage backend to use.
+# Examples: ["local", "s3"]
+# Default: "local" (storage on local disk)
+# NOTE: s3 storage is not yet supported!
+storage-backend: "local"
- # String. Directory to use as a base path for storing files.
- # Make sure whatever user/group gotosocial is running as has permission to access
- # this directly, and create new subdirectories and files with in.
- # Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
- # Default: "/gotosocial/storage"
- basePath: "/gotosocial/storage"
+# String. Directory to use as a base path for storing files.
+# Make sure whatever user/group gotosocial is running as has permission to access
+# this directly, and create new subdirectories and files with in.
+# Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
+# Default: "/gotosocial/storage"
+storage-base-path: "/gotosocial/storage"
- # String. Protocol to use for serving stored files.
- # It's very unlikely that you'll need to change this ever, but there might be edge cases.
- # Examples: ["http", "https"]
- serveProtocol: "https"
+# String. Protocol to use for serving stored files.
+# It's very unlikely that you'll need to change this ever, but there might be edge cases.
+# Examples: ["http", "https"]
+storage-serve-protocol: "https"
- # String. Host for serving stored files.
- # If you're using local storage, this should be THE SAME as the value you've set for Host, above.
- # It should only be a different value if you're serving stored files from a host
- # other than the one your instance is running on.
- # Examples: ["localhost", "example.org"]
- # Default: "localhost" -- you should absolutely change this.
- serveHost: "localhost"
+# String. Host for serving stored files.
+# If you're using local storage, this should be THE SAME as the value you've set for Host, above.
+# It should only be a different value if you're serving stored files from a host
+# other than the one your instance is running on.
+# Examples: ["localhost", "example.org"]
+# Default: "localhost" -- you should absolutely change this.
+storage-serve-host: "localhost"
- # String. Base path for serving stored files. This will be added to serveHost and serveProtocol
- # to form the prefix url of your stored files. Eg., https://example.org/fileserver/.....
- # It's unlikely that you will need to change this.
- # Examples: ["/fileserver", "/media"]
- # Default: "/fileserver"
- serveBasePath: "/fileserver"
+# String. Base path for serving stored files. This will be added to serveHost and serveProtocol
+# to form the prefix url of your stored files. Eg., https://example.org/fileserver/.....
+# It's unlikely that you will need to change this.
+# Examples: ["/fileserver", "/media"]
+# Default: "/fileserver"
+storage-serve-base-path: "/fileserver"
###########################
##### STATUSES CONFIG #####
###########################
# Config pertaining to the creation of statuses/posts, and permitted limits.
-statuses:
- # Int. Maximum amount of characters permitted for a new status.
- # Note that going way higher than the default might break federation.
- # Examples: [140, 500, 5000]
- # Default: 5000
- maxChars: 5000
+# Int. Maximum amount of characters permitted for a new status.
+# Note that going way higher than the default might break federation.
+# Examples: [140, 500, 5000]
+# Default: 5000
+statuses-max-chars: 5000
- # Int. Maximum amount of characters allowed in the CW/subject header of a status.
- # Note that going way higher than the default might break federation.
- # Examples: [100, 200]
- # Default: 100
- cwMaxChars: 100
+# Int. Maximum amount of characters allowed in the CW/subject header of a status.
+# Note that going way higher than the default might break federation.
+# Examples: [100, 200]
+# Default: 100
+statuses-cw-max-chars: 100
- # Int. Maximum amount of options to permit when creating a new poll.
- # Note that going way higher than the default might break federation.
- # Examples: [4, 6, 10]
- # Default: 6
- pollMaxOptions: 6
+# Int. Maximum amount of options to permit when creating a new poll.
+# Note that going way higher than the default might break federation.
+# Examples: [4, 6, 10]
+# Default: 6
+statuses-poll-max-options: 6
- # Int. Maximum amount of characters to permit per poll option when creating a new poll.
- # Note that going way higher than the default might break federation.
- # Examples: [50, 100, 150]
- # Default: 50
- pollOptionMaxChars: 50
+# Int. Maximum amount of characters to permit per poll option when creating a new poll.
+# Note that going way higher than the default might break federation.
+# Examples: [50, 100, 150]
+# Default: 50
+statuses-poll-option-max-chars: 50
- # Int. Maximum amount of media files that can be attached to a new status.
- # Note that going way higher than the default might break federation.
- # Examples: [4, 6, 10]
- # Default: 6
- maxMediaFiles: 6
+# Int. Maximum amount of media files that can be attached to a new status.
+# Note that going way higher than the default might break federation.
+# Examples: [4, 6, 10]
+# Default: 6
+statuses-media-max-files: 6
##############################
##### LETSENCRYPT CONFIG #####
##############################
# Config pertaining to the automatic acquisition and use of LetsEncrypt HTTPS certificates.
-letsEncrypt:
- # Bool. Whether or not letsencrypt should be enabled for the server.
- # If false, the rest of the settings here will be ignored.
- # You should only change this if you want to serve GoToSocial behind a reverse proxy
- # like Traefik, HAProxy, or Nginx.
- # Options: [true, false]
- # Default: true
- enabled: true
+# Bool. Whether or not letsencrypt should be enabled for the server.
+# If false, the rest of the settings here will be ignored.
+# You should only change this if you want to serve GoToSocial behind a reverse proxy
+# like Traefik, HAProxy, or Nginx.
+# Options: [true, false]
+# Default: true
+letsencrypt-enabled: true
- # Int. Port to listen for letsencrypt certificate challenges on.
- # If letsencrypt is enabled, this port must be reachable or you won't be able to obtain certs.
- # If letsencrypt is disabled, this port will not be used.
- # This *must not* be the same as the webserver/API port specified above.
- # Examples: [80, 8000, 1312]
- # Default: 80
- port: 80
+# Int. Port to listen for letsencrypt certificate challenges on.
+# If letsencrypt is enabled, this port must be reachable or you won't be able to obtain certs.
+# If letsencrypt is disabled, this port will not be used.
+# This *must not* be the same as the webserver/API port specified above.
+# Examples: [80, 8000, 1312]
+# Default: 80
+letsencrypt-port: 80
- # String. Directory in which to store LetsEncrypt certificates.
- # It is a good move to make this a sub-path within your storage directory, as it makes
- # backup easier, but you might wish to move them elsewhere if they're also accessed by other services.
- # In any case, make sure GoToSocial has permissions to write to / read from this directory.
- # Examples: ["/home/gotosocial/storage/certs", "/acmecerts"]
- # Default: "/gotosocial/storage/certs"
- certDir: "/gotosocial/storage/certs"
+# String. Directory in which to store LetsEncrypt certificates.
+# It is a good move to make this a sub-path within your storage directory, as it makes
+# backup easier, but you might wish to move them elsewhere if they're also accessed by other services.
+# In any case, make sure GoToSocial has permissions to write to / read from this directory.
+# Examples: ["/home/gotosocial/storage/certs", "/acmecerts"]
+# Default: "/gotosocial/storage/certs"
+letsencrypt-cert-dir: "/gotosocial/storage/certs"
- # String. Email address to use when registering LetsEncrypt certs.
- # Most likely, this will be the email address of the instance administrator.
- # LetsEncrypt will send notifications about expiring certificates etc to this address.
- # Examples: ["admin@example.org"]
- # Default: ""
- emailAddress: ""
+# String. Email address to use when registering LetsEncrypt certs.
+# Most likely, this will be the email address of the instance administrator.
+# LetsEncrypt will send notifications about expiring certificates etc to this address.
+# Examples: ["admin@example.org"]
+# Default: ""
+letsencrypt-email-address: ""
#######################
##### OIDC CONFIG #####
#######################
# Config for authentication with an external OIDC provider (Dex, Google, Auth0, etc).
-oidc:
- # Bool. Enable authentication with external OIDC provider. If set to true, then
- # the other OIDC options must be set as well. If this is set to false, then the standard
- # internal oauth flow will be used, where users sign in to GtS with username/password.
- # Options: [true, false]
- # Default: false
- enabled: false
+# Bool. Enable authentication with external OIDC provider. If set to true, then
+# the other OIDC options must be set as well. If this is set to false, then the standard
+# internal oauth flow will be used, where users sign in to GtS with username/password.
+# Options: [true, false]
+# Default: false
+oidc-enabled: false
- # String. Name of the oidc idp (identity provider). This will be shown to users when
- # they log in.
- # Examples: ["Google", "Dex", "Auth0"]
- # Default: ""
- idpName: ""
+# String. Name of the oidc idp (identity provider). This will be shown to users when
+# they log in.
+# Examples: ["Google", "Dex", "Auth0"]
+# Default: ""
+oidc-idp-name: ""
- # Bool. Skip the normal verification flow of tokens returned from the OIDC provider, ie.,
- # don't check the expiry or signature. This should only be used in debugging or testing,
- # never ever in a production environment as it's extremely unsafe!
- # Options: [true, false]
- # Default: false
- skipVerification: false
+# Bool. Skip the normal verification flow of tokens returned from the OIDC provider, ie.,
+# don't check the expiry or signature. This should only be used in debugging or testing,
+# never ever in a production environment as it's extremely unsafe!
+# Options: [true, false]
+# Default: false
+oidc-skip-verification: false
- # String. The OIDC issuer URI. This is where GtS will redirect users to for login.
- # Typically this will look like a standard web URL.
- # Examples: ["https://auth.example.org", "https://example.org/auth"]
- # Default: ""
- issuer: ""
+# String. The OIDC issuer URI. This is where GtS will redirect users to for login.
+# Typically this will look like a standard web URL.
+# Examples: ["https://auth.example.org", "https://example.org/auth"]
+# Default: ""
+oidc-issuer: ""
- # String. The ID for this client as registered with the OIDC provider.
- # Examples: ["some-client-id", "fda3772a-ad35-41c9-9a59-f1943ad18f54"]
- # Default: ""
- clientID: ""
+# String. The ID for this client as registered with the OIDC provider.
+# Examples: ["some-client-id", "fda3772a-ad35-41c9-9a59-f1943ad18f54"]
+# Default: ""
+oidc-client-id: ""
- # String. The secret for this client as registered with the OIDC provider.
- # Examples: ["super-secret-business", "79379cf5-8057-426d-bb83-af504d98a7b0"]
- # Default: ""
- clientSecret: ""
+# String. The secret for this client as registered with the OIDC provider.
+# Examples: ["super-secret-business", "79379cf5-8057-426d-bb83-af504d98a7b0"]
+# Default: ""
+oidc-client-secret: ""
- # Array of string. Scopes to request from the OIDC provider. The returned values will be used to
- # populate users created in GtS as a result of the authentication flow. 'openid' and 'email' are required.
- # 'profile' is used to extract a username for the newly created user.
- # 'groups' is optional and can be used to determine if a user is an admin (if they're in the group 'admin' or 'admins').
- # Examples: See eg., https://auth0.com/docs/scopes/openid-connect-scopes
- # Default: ["openid", "email", "profile", "groups"]
- scopes:
- - "openid"
- - "email"
- - "profile"
- - "groups"
+# Array of string. Scopes to request from the OIDC provider. The returned values will be used to
+# populate users created in GtS as a result of the authentication flow. 'openid' and 'email' are required.
+# 'profile' is used to extract a username for the newly created user.
+# 'groups' is optional and can be used to determine if a user is an admin (if they're in the group 'admin' or 'admins').
+# Examples: See eg., https://auth0.com/docs/scopes/openid-connect-scopes
+# Default: ["openid", "email", "profile", "groups"]
+oidc-scopes:
+ - "openid"
+ - "email"
+ - "profile"
+ - "groups"
#######################
##### SMTP CONFIG #####
#######################
# Config for sending emails via an smtp server. See https://en.wikipedia.org/wiki/Simple_Mail_Transfer_Protocol
-smtp:
- # String. The hostname of the smtp server you want to use.
- # If this is not set, smtp will not be used to send emails, and you can ignore the other settings.
- # Examples: ["mail.example.org", "localhost"]
- # Default: ""
- host: ""
- # Int. Port to use to connect to the smtp server.
- # Examples: []
- # Default: 0
- port: 0
- # String. Username to use when authenticating with the smtp server.
- # This should have been provided to you by your smtp host.
- # This is often, but not always, an email address.
- # Examples: ["maillord@example.org"]
- # Default: ""
- username:
- # String. Password to use when authenticating with the smtp server.
- # This should have been provided to you by your smtp host.
- # Examples: ["1234", "password"]
- # Default: ""
- password:
- # String. 'From' address for sent emails.
- # Examples: ["mail@example.org"]
- # Default: ""
- from: ""
+# String. The hostname of the smtp server you want to use.
+# If this is not set, smtp will not be used to send emails, and you can ignore the other settings.
+# Examples: ["mail.example.org", "localhost"]
+# Default: ""
+smtp-host: ""
+
+# Int. Port to use to connect to the smtp server.
+# Examples: []
+# Default: 0
+smtp-port: 0
+
+# String. Username to use when authenticating with the smtp server.
+# This should have been provided to you by your smtp host.
+# This is often, but not always, an email address.
+# Examples: ["maillord@example.org"]
+# Default: ""
+smtp-username: ""
+
+# String. Password to use when authenticating with the smtp server.
+# This should have been provided to you by your smtp host.
+# Examples: ["1234", "password"]
+# Default: ""
+smtp-password: ""
+
+# String. 'From' address for sent emails.
+# Examples: ["mail@example.org"]
+# Default: ""
+smtp-from: ""
diff --git a/go.mod b/go.mod
index eebd87f7e..ad9ec2aa2 100644
--- a/go.mod
+++ b/go.mod
@@ -23,6 +23,9 @@ require (
github.com/oklog/ulid v1.3.1
github.com/russross/blackfriday/v2 v2.1.0
github.com/sirupsen/logrus v1.8.1
+ github.com/spf13/cobra v1.2.1
+ github.com/spf13/pflag v1.0.5
+ github.com/spf13/viper v1.9.0
github.com/stretchr/testify v1.7.0
github.com/superseriousbusiness/activity v1.0.1-0.20211113133524-56560b73ace8
github.com/superseriousbusiness/exifremove v0.0.0-20210330092427-6acd27eac203
@@ -31,12 +34,10 @@ require (
github.com/uptrace/bun v1.0.18
github.com/uptrace/bun/dialect/pgdialect v1.0.18
github.com/uptrace/bun/dialect/sqlitedialect v1.0.18
- github.com/urfave/cli/v2 v2.3.0
github.com/wagslane/go-password-validator v0.3.0
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
golang.org/x/text v0.3.7
- gopkg.in/yaml.v2 v2.4.0
modernc.org/sqlite v1.14.1
mvdan.cc/xurls/v2 v2.3.0
)
@@ -51,7 +52,6 @@ require (
codeberg.org/gruf/go-nowish v1.0.2 // indirect
codeberg.org/gruf/go-pools v1.0.2 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dsoprea/go-exif v0.0.0-20210625224831-a6301f85c82b // indirect
github.com/dsoprea/go-exif/v2 v2.0.0-20210625224831-a6301f85c82b // indirect
@@ -61,6 +61,7 @@ require (
github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d // indirect
github.com/dsoprea/go-png-image-structure v0.0.0-20210512210324-29b889a6093d // indirect
github.com/dsoprea/go-utility v0.0.0-20200717064901-2fccff4aa15e // indirect
+ github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.1 // indirect
github.com/go-playground/locales v0.14.0 // indirect
@@ -74,6 +75,8 @@ require (
github.com/gorilla/css v1.0.0 // indirect
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/gorilla/sessions v1.2.1 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
@@ -84,12 +87,18 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
+ github.com/magiconair/properties v1.8.5 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
+ github.com/spf13/afero v1.6.0 // indirect
+ github.com/spf13/cast v1.4.1 // indirect
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/subosito/gotenv v1.2.0 // indirect
github.com/tdewolff/parse/v2 v2.5.22 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/ugorji/go/codec v1.2.6 // indirect
@@ -102,7 +111,9 @@ require (
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
+ gopkg.in/ini.v1 v1.63.2 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
lukechampine.com/uint128 v1.1.1 // indirect
modernc.org/cc/v3 v3.35.18 // indirect
diff --git a/go.sum b/go.sum
index e568b38e3..c576cb525 100644
--- a/go.sum
+++ b/go.sum
@@ -13,6 +13,16 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -21,6 +31,8 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -60,36 +72,48 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/ReneKroon/ttlcache v1.7.0 h1:8BkjFfrzVFXyrqnMtezAaJ6AHPSsVV10m6w28N/Fgkk=
github.com/ReneKroon/ttlcache v1.7.0/go.mod h1:8BGGzdumrIjWxdRx8zpK6L3oGMWvIXdvB2GD1cfvd+I=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antonlindstrom/pgstore v0.0.0-20200229204646-b08ebf1105e0/go.mod h1:2Ti6VUHVxpC0VSmTZzEvpzysnaGAfGBOoMIz5ykPyyw=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/boj/redistore v0.0.0-20180917114910-cd5dcc76aeff/go.mod h1:+RTT1BOk5P97fT2CiHkbFQwkK3mjsFAP6zCYV2aXtjw=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/bradleypeabody/gorilla-sessions-memcache v0.0.0-20181103040241-659414f458e1/go.mod h1:dkChI7Tbtx7H1Tj7TqGSZMOeGpMP5gLHtjroHd4agiI=
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw=
github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/dave/jennifer v1.3.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@@ -132,15 +156,23 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/cors v1.3.1 h1:doAsuITavI4IOcd0Y19U4B+O0dNWihRyX//nn4sEmgA=
github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk=
github.com/gin-contrib/sessions v0.0.4 h1:gq4fNa1Zmp564iHP5G6EBuktilEos8VKhe2sza1KMgo=
@@ -186,8 +218,10 @@ github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/goccy/go-json v0.4.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.5.1/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
@@ -205,8 +239,10 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -220,7 +256,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -236,7 +274,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -245,6 +285,8 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -252,12 +294,21 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
@@ -272,15 +323,46 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
@@ -335,6 +417,7 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@@ -345,6 +428,7 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kidstuff/mongostore v0.0.0-20181113001930-e650cd85ee4b/go.mod h1:g2nVr8KZVXJSS97Jo8pJ0jgq29P6H7dG0oplUA86MQw=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
@@ -353,7 +437,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kpango/fastime v1.0.16/go.mod h1:lVqUTcXmQnk1wriyvq5DElbRSRDC0XtqbXQRdz0Eo+g=
github.com/kpango/glg v1.5.8/go.mod h1:HI0g/1T4dmUhdoT2isXHrCM4FeNjc+t7fZujjvqYIeQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -372,13 +458,21 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.3 h1:v9QZf2Sn6AmjXtQeFpdoq/eaNtYP6IN+7lcrygsIAtg=
github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
@@ -387,12 +481,25 @@ github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A
github.com/memcachier/mc v2.0.1+incompatible/go.mod h1:7bkvFE61leUBvXz+yxsOnGBQSZpBSPIMUQSmmSHvuXc=
github.com/microcosm-cc/bluemonday v1.0.16 h1:kHmAq2t7WPWLjiGvzKa5o3HzSfahUKiOq7fAPUiMNIc=
github.com/microcosm-cc/bluemonday v1.0.16/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
@@ -407,17 +514,25 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
+github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
@@ -428,8 +543,11 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
@@ -444,7 +562,21 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk=
+github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -455,6 +587,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/superseriousbusiness/activity v1.0.1-0.20211113133524-56560b73ace8 h1:8Bwy6CSsT33/sF5FhjND4vr7jiJCaq4elNTAW4rUzVc=
github.com/superseriousbusiness/activity v1.0.1-0.20211113133524-56560b73ace8/go.mod h1:ZY9xwFDucvp6zTvM6FQZGl8PSOofPBFIAy6gSc85XkY=
github.com/superseriousbusiness/exifremove v0.0.0-20210330092427-6acd27eac203 h1:1SWXcTphBQjYGWRRxLFIAR1LVtQEj4eR7xPtyeOVM/c=
@@ -500,8 +634,6 @@ github.com/uptrace/bun/dialect/pgdialect v1.0.18 h1:PZDvpQSrc7onj1SsGNKFHy4LDfob
github.com/uptrace/bun/dialect/pgdialect v1.0.18/go.mod h1:Zw3h+kaJKexgfsHi+0tAxZXx0iHh16lyvwXnnMHL7xc=
github.com/uptrace/bun/dialect/sqlitedialect v1.0.18 h1:Xc4zoBtS2lK47lDjA5J3K1p/JwGGKk50Yxhzzj2kwPY=
github.com/uptrace/bun/dialect/sqlitedialect v1.0.18/go.mod h1:A9R2zIMUL1MkIl5xYLzq/NHQ8PC2Ob3kRgegMs7obdA=
-github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
-github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.14.0 h1:67bfuW9azCMwW/Jlq/C+VeihNpAuJMWkYPBig1gdi3A=
@@ -530,13 +662,20 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
@@ -556,16 +695,19 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20180527072434-ab813273cd59/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -590,6 +732,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -598,12 +742,16 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -615,6 +763,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -633,7 +782,14 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -644,6 +800,16 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
@@ -656,11 +822,14 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -674,8 +843,11 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -683,6 +855,7 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -695,14 +868,29 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211123173158-ef496fb156ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -717,6 +905,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -739,10 +928,12 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -766,10 +957,23 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -795,6 +999,18 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -826,12 +1042,35 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -844,6 +1083,20 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -868,6 +1121,9 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c=
+gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
diff --git a/internal/api/client/account/account.go b/internal/api/client/account/account.go
index 6e8b1e242..d75fb8e9c 100644
--- a/internal/api/client/account/account.go
+++ b/internal/api/client/account/account.go
@@ -24,7 +24,6 @@ import (
"github.com/gin-gonic/gin"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
@@ -76,14 +75,12 @@ const (
// Module implements the ClientAPIModule interface for account-related actions
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new account module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/account/account_test.go b/internal/api/client/account/account_test.go
index 315ddfce3..f7dfa4520 100644
--- a/internal/api/client/account/account_test.go
+++ b/internal/api/client/account/account_test.go
@@ -8,6 +8,7 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/gin-gonic/gin"
+ "github.com/spf13/viper"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/client/account"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -24,7 +25,6 @@ import (
type AccountStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
tc typeutils.TypeConverter
storage *kv.KVStore
@@ -57,7 +57,7 @@ func (suite *AccountStandardTestSuite) SetupSuite() {
}
func (suite *AccountStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestConfig()
suite.db = testrig.NewTestDB()
suite.storage = testrig.NewTestStorage()
testrig.InitTestLog()
@@ -65,7 +65,7 @@ func (suite *AccountStandardTestSuite) SetupTest() {
suite.sentEmails = make(map[string]string)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", suite.sentEmails)
suite.processor = testrig.NewTestProcessor(suite.db, suite.storage, suite.federator, suite.emailSender)
- suite.accountModule = account.New(suite.config, suite.processor).(*account.Module)
+ suite.accountModule = account.New(suite.processor).(*account.Module)
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
}
@@ -83,7 +83,10 @@ func (suite *AccountStandardTestSuite) newContext(recorder *httptest.ResponseRec
ctx.Set(oauth.SessionAuthorizedApplication, suite.testApplications["application_1"])
ctx.Set(oauth.SessionAuthorizedUser, suite.testUsers["local_account_1"])
- baseURI := fmt.Sprintf("%s://%s", suite.config.Protocol, suite.config.Host)
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+
+ baseURI := fmt.Sprintf("%s://%s", protocol, host)
requestURI := fmt.Sprintf("%s/%s", baseURI, requestPath)
ctx.Request = httptest.NewRequest(http.MethodPatch, requestURI, bytes.NewReader(requestBody)) // the endpoint we're hitting
diff --git a/internal/api/client/account/accountcreate.go b/internal/api/client/account/accountcreate.go
index 6497d4d01..ae9d7b0d7 100644
--- a/internal/api/client/account/accountcreate.go
+++ b/internal/api/client/account/accountcreate.go
@@ -20,10 +20,12 @@ package account
import (
"errors"
- "github.com/sirupsen/logrus"
"net"
"net/http"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
"github.com/gin-gonic/gin"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -85,7 +87,7 @@ func (m *Module) AccountCreatePOSTHandler(c *gin.Context) {
}
l.Tracef("validating form %+v", form)
- if err := validateCreateAccount(form, m.config.AccountsConfig); err != nil {
+ if err := validateCreateAccount(form); err != nil {
l.Debugf("error validating form: %s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -114,8 +116,10 @@ func (m *Module) AccountCreatePOSTHandler(c *gin.Context) {
// validateCreateAccount checks through all the necessary prerequisites for creating a new account,
// according to the provided account create request. If the account isn't eligible, an error will be returned.
-func validateCreateAccount(form *model.AccountCreateRequest, c *config.AccountsConfig) error {
- if !c.OpenRegistration {
+func validateCreateAccount(form *model.AccountCreateRequest) error {
+ keys := config.Keys
+
+ if !viper.GetBool(keys.AccountsRegistrationOpen) {
return errors.New("registration is not open for this server")
}
@@ -139,7 +143,7 @@ func validateCreateAccount(form *model.AccountCreateRequest, c *config.AccountsC
return err
}
- if err := validate.SignUpReason(form.Reason, c.ReasonRequired); err != nil {
+ if err := validate.SignUpReason(form.Reason, viper.GetBool(keys.AccountsReasonRequired)); err != nil {
return err
}
diff --git a/internal/api/client/admin/admin.go b/internal/api/client/admin/admin.go
index 35d162139..f611807a6 100644
--- a/internal/api/client/admin/admin.go
+++ b/internal/api/client/admin/admin.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -47,14 +46,12 @@ const (
// Module implements the ClientAPIModule interface for admin-related actions (reports, emojis, etc)
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new admin module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/app/app.go b/internal/api/client/app/app.go
index b3df7c388..a5f4c9bcd 100644
--- a/internal/api/client/app/app.go
+++ b/internal/api/client/app/app.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -32,14 +31,12 @@ const BasePath = "/api/v1/apps"
// Module implements the ClientAPIModule interface for requests relating to registering/removing applications
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new auth module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/auth/auth.go b/internal/api/client/auth/auth.go
index 7d9a0caf5..2552331a6 100644
--- a/internal/api/client/auth/auth.go
+++ b/internal/api/client/auth/auth.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/oidc"
@@ -54,16 +53,14 @@ const (
// Module implements the ClientAPIModule interface for
type Module struct {
- config *config.Config
db db.DB
server oauth.Server
idp oidc.IDP
}
// New returns a new auth module
-func New(config *config.Config, db db.DB, server oauth.Server, idp oidc.IDP) api.ClientModule {
+func New(db db.DB, server oauth.Server, idp oidc.IDP) api.ClientModule {
return &Module{
- config: config,
db: db,
server: server,
idp: idp,
diff --git a/internal/api/client/auth/auth_test.go b/internal/api/client/auth/auth_test.go
index ae58ffbbb..7f2a78ada 100644
--- a/internal/api/client/auth/auth_test.go
+++ b/internal/api/client/auth/auth_test.go
@@ -18,134 +18,4 @@
package auth_test
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/google/uuid"
- "github.com/sirupsen/logrus"
- "github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
- "github.com/superseriousbusiness/gotosocial/internal/db"
- "github.com/superseriousbusiness/gotosocial/internal/db/bundb"
- "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
- "github.com/superseriousbusiness/gotosocial/internal/oauth"
- "golang.org/x/crypto/bcrypt"
-)
-
-type AuthTestSuite struct {
- suite.Suite
- oauthServer oauth.Server
- db db.DB
- testAccount *gtsmodel.Account
- testApplication *gtsmodel.Application
- testUser *gtsmodel.User
- testClient *gtsmodel.Client
- config *config.Config
-}
-
-// SetupSuite sets some variables on the suite that we can use as consts (more or less) throughout
-func (suite *AuthTestSuite) SetupSuite() {
- c := config.Default()
- // we're running on localhost without https so set the protocol to http
- c.Protocol = "http"
- // just for testing
- c.Host = "localhost:8080"
- // because go tests are run within the test package directory, we need to fiddle with the templateconfig
- // basedir in a way that we wouldn't normally have to do when running the binary, in order to make
- // the templates actually load
- c.TemplateConfig.BaseDir = "../../../web/template/"
- c.DBConfig = &config.DBConfig{
- Type: "postgres",
- Address: "localhost",
- Port: 5432,
- User: "postgres",
- Password: "postgres",
- Database: "postgres",
- ApplicationName: "gotosocial",
- }
- suite.config = c
-
- encryptedPassword, err := bcrypt.GenerateFromPassword([]byte("password"), bcrypt.DefaultCost)
- if err != nil {
- logrus.Panicf("error encrypting user pass: %s", err)
- }
-
- acctID := uuid.NewString()
-
- suite.testAccount = >smodel.Account{
- ID: acctID,
- Username: "test_user",
- }
- suite.testUser = >smodel.User{
- EncryptedPassword: string(encryptedPassword),
- Email: "user@example.org",
- AccountID: acctID,
- }
- suite.testClient = >smodel.Client{
- ID: "a-known-client-id",
- Secret: "some-secret",
- Domain: fmt.Sprintf("%s://%s", c.Protocol, c.Host),
- }
- suite.testApplication = >smodel.Application{
- Name: "a test application",
- Website: "https://some-application-website.com",
- RedirectURI: "http://localhost:8080",
- ClientID: "a-known-client-id",
- ClientSecret: "some-secret",
- Scopes: "read",
- }
-}
-
-// SetupTest creates a postgres connection and creates the oauth_clients table before each test
-func (suite *AuthTestSuite) SetupTest() {
-
- log := logrus.New()
- log.SetLevel(logrus.TraceLevel)
- db, err := bundb.NewBunDBService(context.Background(), suite.config)
- if err != nil {
- logrus.Panicf("error creating database connection: %s", err)
- }
-
- suite.db = db
- suite.oauthServer = oauth.New(context.Background(), suite.db)
-
- if err := suite.db.Put(context.Background(), suite.testAccount); err != nil {
- logrus.Panicf("could not insert test account into db: %s", err)
- }
- if err := suite.db.Put(context.Background(), suite.testUser); err != nil {
- logrus.Panicf("could not insert test user into db: %s", err)
- }
- if err := suite.db.Put(context.Background(), suite.testClient); err != nil {
- logrus.Panicf("could not insert test client into db: %s", err)
- }
- if err := suite.db.Put(context.Background(), suite.testApplication); err != nil {
- logrus.Panicf("could not insert test application into db: %s", err)
- }
-
-}
-
-// TearDownTest drops the oauth_clients table and closes the pg connection after each test
-func (suite *AuthTestSuite) TearDownTest() {
- models := []interface{}{
- >smodel.Client{},
- >smodel.Token{},
- >smodel.User{},
- >smodel.Account{},
- >smodel.Application{},
- }
- for _, m := range models {
- if err := suite.db.DropTable(context.Background(), m); err != nil {
- logrus.Panicf("error dropping table: %s", err)
- }
- }
- if err := suite.db.Stop(context.Background()); err != nil {
- logrus.Panicf("error closing db connection: %s", err)
- }
- suite.db = nil
-}
-
-func TestAuthTestSuite(t *testing.T) {
- suite.Run(t, new(AuthTestSuite))
-}
+// TODO
diff --git a/internal/api/client/auth/callback.go b/internal/api/client/auth/callback.go
index 286bcd935..d50fea3db 100644
--- a/internal/api/client/auth/callback.go
+++ b/internal/api/client/auth/callback.go
@@ -30,6 +30,8 @@ import (
"github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/oidc"
@@ -211,7 +213,8 @@ func (m *Module) parseUserFromClaims(ctx context.Context, claims *oidc.Claims, i
password := uuid.NewString() + uuid.NewString()
// create the user! this will also create an account and store it in the database so we don't need to do that here
- user, err = m.db.NewSignup(ctx, username, "", m.config.AccountsConfig.RequireApproval, claims.Email, password, ip, "", appID, claims.EmailVerified, admin)
+ requireApproval := viper.GetBool(config.Keys.AccountsApprovalRequired)
+ user, err = m.db.NewSignup(ctx, username, "", requireApproval, claims.Email, password, ip, "", appID, claims.EmailVerified, admin)
if err != nil {
return nil, fmt.Errorf("error creating user: %s", err)
}
diff --git a/internal/api/client/blocks/blocks.go b/internal/api/client/blocks/blocks.go
index 531dfa8d8..552a6c599 100644
--- a/internal/api/client/blocks/blocks.go
+++ b/internal/api/client/blocks/blocks.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -41,14 +40,12 @@ const (
// Module implements the ClientAPIModule interface for everything relating to viewing blocks
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new blocks module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/emoji/emoji.go b/internal/api/client/emoji/emoji.go
index 4c69fbcc2..4ce021d8f 100644
--- a/internal/api/client/emoji/emoji.go
+++ b/internal/api/client/emoji/emoji.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -34,14 +33,12 @@ const (
// Module implements the ClientAPIModule interface for everything related to emoji
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new emoji module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/favourites/favourites.go b/internal/api/client/favourites/favourites.go
index daaab82f8..f03372ccd 100644
--- a/internal/api/client/favourites/favourites.go
+++ b/internal/api/client/favourites/favourites.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -45,14 +44,12 @@ const (
// Module implements the ClientAPIModule interface for everything relating to viewing favourites
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new favourites module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/fileserver/fileserver.go b/internal/api/client/fileserver/fileserver.go
index e60105686..092a15256 100644
--- a/internal/api/client/fileserver/fileserver.go
+++ b/internal/api/client/fileserver/fileserver.go
@@ -22,6 +22,7 @@ import (
"fmt"
"net/http"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/api"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
@@ -42,22 +43,20 @@ const (
// FileServer implements the RESTAPIModule interface.
// The goal here is to serve requested media files if the gotosocial server is configured to use local storage.
type FileServer struct {
- config *config.Config
- processor processing.Processor
- storageBase string
+ processor processing.Processor
+ storageServeBasePath string
}
// New returns a new fileServer module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &FileServer{
- config: config,
- processor: processor,
- storageBase: config.StorageConfig.ServeBasePath,
+ processor: processor,
+ storageServeBasePath: viper.GetString(config.Keys.StorageServeBasePath),
}
}
// Route satisfies the RESTAPIModule interface
func (m *FileServer) Route(s router.Router) error {
- s.AttachHandler(http.MethodGet, fmt.Sprintf("%s/:%s/:%s/:%s/:%s", m.storageBase, AccountIDKey, MediaTypeKey, MediaSizeKey, FileNameKey), m.ServeFile)
+ s.AttachHandler(http.MethodGet, fmt.Sprintf("%s/:%s/:%s/:%s/:%s", m.storageServeBasePath, AccountIDKey, MediaTypeKey, MediaSizeKey, FileNameKey), m.ServeFile)
return nil
}
diff --git a/internal/api/client/fileserver/servefile_test.go b/internal/api/client/fileserver/servefile_test.go
index 692122a7b..b93f97606 100644
--- a/internal/api/client/fileserver/servefile_test.go
+++ b/internal/api/client/fileserver/servefile_test.go
@@ -32,7 +32,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/client/fileserver"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -47,7 +46,6 @@ import (
type ServeFileTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
storage *kv.KVStore
federator federation.Federator
@@ -75,9 +73,9 @@ type ServeFileTestSuite struct {
func (suite *ServeFileTestSuite) SetupSuite() {
// setup standard items
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
+ testrig.InitTestConfig()
testrig.InitTestLog()
+ suite.db = testrig.NewTestDB()
suite.storage = testrig.NewTestStorage()
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db), suite.storage)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
@@ -88,7 +86,7 @@ func (suite *ServeFileTestSuite) SetupSuite() {
suite.oauthServer = testrig.NewTestOauthServer(suite.db)
// setup module being tested
- suite.fileServer = fileserver.New(suite.config, suite.processor).(*fileserver.FileServer)
+ suite.fileServer = fileserver.New(suite.processor).(*fileserver.FileServer)
}
func (suite *ServeFileTestSuite) TearDownSuite() {
diff --git a/internal/api/client/filter/filter.go b/internal/api/client/filter/filter.go
index 2ec53bd0a..e8dea8170 100644
--- a/internal/api/client/filter/filter.go
+++ b/internal/api/client/filter/filter.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -34,14 +33,12 @@ const (
// Module implements the ClientAPIModule interface for every related to filters
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new filter module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/followrequest/followrequest.go b/internal/api/client/followrequest/followrequest.go
index 4f46e1654..75fbbea10 100644
--- a/internal/api/client/followrequest/followrequest.go
+++ b/internal/api/client/followrequest/followrequest.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -43,14 +42,12 @@ const (
// Module implements the ClientAPIModule interface
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new follow request module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/followrequest/followrequest_test.go b/internal/api/client/followrequest/followrequest_test.go
index 3c2cc32d0..e7dccc210 100644
--- a/internal/api/client/followrequest/followrequest_test.go
+++ b/internal/api/client/followrequest/followrequest_test.go
@@ -25,6 +25,7 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/gin-gonic/gin"
+ "github.com/spf13/viper"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/client/followrequest"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -39,7 +40,6 @@ import (
type FollowRequestStandardTestSuite struct {
suite.Suite
- config *config.Config
db db.DB
storage *kv.KVStore
federator federation.Federator
@@ -70,14 +70,14 @@ func (suite *FollowRequestStandardTestSuite) SetupSuite() {
}
func (suite *FollowRequestStandardTestSuite) SetupTest() {
+ testrig.InitTestConfig()
testrig.InitTestLog()
- suite.config = testrig.NewTestConfig()
suite.db = testrig.NewTestDB()
suite.storage = testrig.NewTestStorage()
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db), suite.storage)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
suite.processor = testrig.NewTestProcessor(suite.db, suite.storage, suite.federator, suite.emailSender)
- suite.followRequestModule = followrequest.New(suite.config, suite.processor).(*followrequest.Module)
+ suite.followRequestModule = followrequest.New(suite.processor).(*followrequest.Module)
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
}
@@ -95,7 +95,10 @@ func (suite *FollowRequestStandardTestSuite) newContext(recorder *httptest.Respo
ctx.Set(oauth.SessionAuthorizedApplication, suite.testApplications["application_1"])
ctx.Set(oauth.SessionAuthorizedUser, suite.testUsers["local_account_1"])
- baseURI := fmt.Sprintf("%s://%s", suite.config.Protocol, suite.config.Host)
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+
+ baseURI := fmt.Sprintf("%s://%s", protocol, host)
requestURI := fmt.Sprintf("%s/%s", baseURI, requestPath)
ctx.Request = httptest.NewRequest(requestMethod, requestURI, bytes.NewReader(requestBody)) // the endpoint we're hitting
diff --git a/internal/api/client/instance/instance.go b/internal/api/client/instance/instance.go
index 33e0054c3..758cce376 100644
--- a/internal/api/client/instance/instance.go
+++ b/internal/api/client/instance/instance.go
@@ -4,7 +4,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -16,14 +15,12 @@ const (
// Module implements the ClientModule interface
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new instance information module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/instance/instanceget.go b/internal/api/client/instance/instanceget.go
index fac917b25..f2f496cb5 100644
--- a/internal/api/client/instance/instanceget.go
+++ b/internal/api/client/instance/instanceget.go
@@ -1,9 +1,12 @@
package instance
import (
- "github.com/sirupsen/logrus"
"net/http"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+
"github.com/gin-gonic/gin"
)
@@ -32,7 +35,9 @@ import (
func (m *Module) InstanceInformationGETHandler(c *gin.Context) {
l := logrus.WithField("func", "InstanceInformationGETHandler")
- instance, err := m.processor.InstanceGet(c.Request.Context(), m.config.Host)
+ host := viper.GetString(config.Keys.Host)
+
+ instance, err := m.processor.InstanceGet(c.Request.Context(), host)
if err != nil {
l.Debugf("error getting instance from processor: %s", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
diff --git a/internal/api/client/list/list.go b/internal/api/client/list/list.go
index 106682ca9..347525d54 100644
--- a/internal/api/client/list/list.go
+++ b/internal/api/client/list/list.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -34,14 +33,12 @@ const (
// Module implements the ClientAPIModule interface for everything related to lists
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new list module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/media/media.go b/internal/api/client/media/media.go
index a051dea48..669ce9ce9 100644
--- a/internal/api/client/media/media.go
+++ b/internal/api/client/media/media.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -38,14 +37,12 @@ const BasePathWithID = BasePath + "/:" + IDKey
// Module implements the ClientAPIModule interface for media
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new auth module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/media/mediacreate.go b/internal/api/client/media/mediacreate.go
index 5b7a2dea7..f808b554c 100644
--- a/internal/api/client/media/mediacreate.go
+++ b/internal/api/client/media/mediacreate.go
@@ -21,9 +21,11 @@ package media
import (
"errors"
"fmt"
- "github.com/sirupsen/logrus"
"net/http"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
"github.com/gin-gonic/gin"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -102,7 +104,7 @@ func (m *Module) MediaCreatePOSTHandler(c *gin.Context) {
// Give the fields on the request form a first pass to make sure the request is superficially valid.
l.Tracef("validating form %+v", form)
- if err := validateCreateMedia(form, m.config.MediaConfig); err != nil {
+ if err := validateCreateMedia(form); err != nil {
l.Debugf("error validating form: %s", err)
c.JSON(http.StatusUnprocessableEntity, gin.H{"error": err.Error()})
return
@@ -119,24 +121,30 @@ func (m *Module) MediaCreatePOSTHandler(c *gin.Context) {
c.JSON(http.StatusOK, apiAttachment)
}
-func validateCreateMedia(form *model.AttachmentRequest, config *config.MediaConfig) error {
+func validateCreateMedia(form *model.AttachmentRequest) error {
// check there actually is a file attached and it's not size 0
if form.File == nil {
return errors.New("no attachment given")
}
+ keys := config.Keys
+ maxVideoSize := viper.GetInt(keys.MediaVideoMaxSize)
+ maxImageSize := viper.GetInt(keys.MediaImageMaxSize)
+ minDescriptionChars := viper.GetInt(keys.MediaDescriptionMinChars)
+ maxDescriptionChars := viper.GetInt(keys.MediaDescriptionMaxChars)
+
// a very superficial check to see if no size limits are exceeded
// we still don't actually know which media types we're dealing with but the other handlers will go into more detail there
- maxSize := config.MaxVideoSize
- if config.MaxImageSize > maxSize {
- maxSize = config.MaxImageSize
+ maxSize := maxVideoSize
+ if maxImageSize > maxSize {
+ maxSize = maxImageSize
}
if form.File.Size > int64(maxSize) {
return fmt.Errorf("file size limit exceeded: limit is %d bytes but attachment was %d bytes", maxSize, form.File.Size)
}
- if len(form.Description) < config.MinDescriptionChars || len(form.Description) > config.MaxDescriptionChars {
- return fmt.Errorf("image description length must be between %d and %d characters (inclusive), but provided image description was %d chars", config.MinDescriptionChars, config.MaxDescriptionChars, len(form.Description))
+ if len(form.Description) < minDescriptionChars || len(form.Description) > maxDescriptionChars {
+ return fmt.Errorf("image description length must be between %d and %d characters (inclusive), but provided image description was %d chars", minDescriptionChars, maxDescriptionChars, len(form.Description))
}
// TODO: validate focus here
diff --git a/internal/api/client/media/mediacreate_test.go b/internal/api/client/media/mediacreate_test.go
index 9ad1b9001..f8a17c9de 100644
--- a/internal/api/client/media/mediacreate_test.go
+++ b/internal/api/client/media/mediacreate_test.go
@@ -35,7 +35,6 @@ import (
"github.com/stretchr/testify/suite"
mediamodule "github.com/superseriousbusiness/gotosocial/internal/api/client/media"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -50,7 +49,6 @@ import (
type MediaCreateTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
storage *kv.KVStore
federator federation.Federator
@@ -78,9 +76,9 @@ type MediaCreateTestSuite struct {
func (suite *MediaCreateTestSuite) SetupSuite() {
// setup standard items
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
+ testrig.InitTestConfig()
testrig.InitTestLog()
+ suite.db = testrig.NewTestDB()
suite.storage = testrig.NewTestStorage()
suite.tc = testrig.NewTestTypeConverter(suite.db)
suite.mediaHandler = testrig.NewTestMediaHandler(suite.db, suite.storage)
@@ -90,7 +88,7 @@ func (suite *MediaCreateTestSuite) SetupSuite() {
suite.processor = testrig.NewTestProcessor(suite.db, suite.storage, suite.federator, suite.emailSender)
// setup module being tested
- suite.mediaModule = mediamodule.New(suite.config, suite.processor).(*mediamodule.Module)
+ suite.mediaModule = mediamodule.New(suite.processor).(*mediamodule.Module)
}
func (suite *MediaCreateTestSuite) TearDownSuite() {
diff --git a/internal/api/client/media/mediaupdate.go b/internal/api/client/media/mediaupdate.go
index 2a6d056bb..c22e4e919 100644
--- a/internal/api/client/media/mediaupdate.go
+++ b/internal/api/client/media/mediaupdate.go
@@ -21,9 +21,11 @@ package media
import (
"errors"
"fmt"
- "github.com/sirupsen/logrus"
"net/http"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
"github.com/gin-gonic/gin"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -117,7 +119,7 @@ func (m *Module) MediaPUTHandler(c *gin.Context) {
// Give the fields on the request form a first pass to make sure the request is superficially valid.
l.Tracef("validating form %+v", form)
- if err := validateUpdateMedia(&form, m.config.MediaConfig); err != nil {
+ if err := validateUpdateMedia(&form); err != nil {
l.Debugf("error validating form: %s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -132,11 +134,14 @@ func (m *Module) MediaPUTHandler(c *gin.Context) {
c.JSON(http.StatusOK, attachment)
}
-func validateUpdateMedia(form *model.AttachmentUpdateRequest, config *config.MediaConfig) error {
+func validateUpdateMedia(form *model.AttachmentUpdateRequest) error {
+ keys := config.Keys
+ minDescriptionChars := viper.GetInt(keys.MediaDescriptionMinChars)
+ maxDescriptionChars := viper.GetInt(keys.MediaDescriptionMaxChars)
if form.Description != nil {
- if len(*form.Description) < config.MinDescriptionChars || len(*form.Description) > config.MaxDescriptionChars {
- return fmt.Errorf("image description length must be between %d and %d characters (inclusive), but provided image description was %d chars", config.MinDescriptionChars, config.MaxDescriptionChars, len(*form.Description))
+ if len(*form.Description) < minDescriptionChars || len(*form.Description) > maxDescriptionChars {
+ return fmt.Errorf("image description length must be between %d and %d characters (inclusive), but provided image description was %d chars", minDescriptionChars, maxDescriptionChars, len(*form.Description))
}
}
diff --git a/internal/api/client/notification/notification.go b/internal/api/client/notification/notification.go
index 04a1c2909..bc2d1e4bb 100644
--- a/internal/api/client/notification/notification.go
+++ b/internal/api/client/notification/notification.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -46,14 +45,12 @@ const (
// Module implements the ClientAPIModule interface for every related to posting/deleting/interacting with notifications
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new notification module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/search/search.go b/internal/api/client/search/search.go
index 474cccb27..b0b9c8613 100644
--- a/internal/api/client/search/search.go
+++ b/internal/api/client/search/search.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -65,14 +64,12 @@ const (
// Module implements the ClientAPIModule interface for everything related to searching
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new search module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/status/status.go b/internal/api/client/status/status.go
index 6c7f077e1..2c6fc614a 100644
--- a/internal/api/client/status/status.go
+++ b/internal/api/client/status/status.go
@@ -26,7 +26,6 @@ import (
"github.com/gin-gonic/gin"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -75,14 +74,12 @@ const (
// Module implements the ClientAPIModule interface for every related to posting/deleting/interacting with statuses
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new account module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/status/status_test.go b/internal/api/client/status/status_test.go
index 399873fcc..0af5b78c2 100644
--- a/internal/api/client/status/status_test.go
+++ b/internal/api/client/status/status_test.go
@@ -22,7 +22,6 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/client/status"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -35,7 +34,6 @@ import (
type StatusStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
tc typeutils.TypeConverter
federator federation.Federator
@@ -67,15 +65,15 @@ func (suite *StatusStandardTestSuite) SetupSuite() {
}
func (suite *StatusStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestConfig()
+ testrig.InitTestLog()
suite.db = testrig.NewTestDB()
suite.tc = testrig.NewTestTypeConverter(suite.db)
suite.storage = testrig.NewTestStorage()
- testrig.InitTestLog()
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db), suite.storage)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
suite.processor = testrig.NewTestProcessor(suite.db, suite.storage, suite.federator, suite.emailSender)
- suite.statusModule = status.New(suite.config, suite.processor).(*status.Module)
+ suite.statusModule = status.New(suite.processor).(*status.Module)
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
}
diff --git a/internal/api/client/status/statuscreate.go b/internal/api/client/status/statuscreate.go
index e9d92890e..629a325c5 100644
--- a/internal/api/client/status/statuscreate.go
+++ b/internal/api/client/status/statuscreate.go
@@ -21,9 +21,11 @@ package status
import (
"errors"
"fmt"
- "github.com/sirupsen/logrus"
"net/http"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
"github.com/gin-gonic/gin"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -96,7 +98,7 @@ func (m *Module) StatusCreatePOSTHandler(c *gin.Context) {
// Give the fields on the request form a first pass to make sure the request is superficially valid.
l.Tracef("validating form %+v", form)
- if err := validateCreateStatus(form, m.config.StatusesConfig); err != nil {
+ if err := validateCreateStatus(form); err != nil {
l.Debugf("error validating form: %s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -112,7 +114,7 @@ func (m *Module) StatusCreatePOSTHandler(c *gin.Context) {
c.JSON(http.StatusOK, apiStatus)
}
-func validateCreateStatus(form *model.AdvancedStatusCreateForm, config *config.StatusesConfig) error {
+func validateCreateStatus(form *model.AdvancedStatusCreateForm) error {
// validate that, structurally, we have a valid status/post
if form.Status == "" && form.MediaIDs == nil && form.Poll == nil {
return errors.New("no status, media, or poll provided")
@@ -122,16 +124,23 @@ func validateCreateStatus(form *model.AdvancedStatusCreateForm, config *config.S
return errors.New("can't post media + poll in same status")
}
+ keys := config.Keys
+ maxChars := viper.GetInt(keys.StatusesMaxChars)
+ maxMediaFiles := viper.GetInt(keys.StatusesMediaMaxFiles)
+ maxPollOptions := viper.GetInt(keys.StatusesPollMaxOptions)
+ maxPollChars := viper.GetInt(keys.StatusesPollOptionMaxChars)
+ maxCwChars := viper.GetInt(keys.StatusesCWMaxChars)
+
// validate status
if form.Status != "" {
- if len(form.Status) > config.MaxChars {
- return fmt.Errorf("status too long, %d characters provided but limit is %d", len(form.Status), config.MaxChars)
+ if len(form.Status) > maxChars {
+ return fmt.Errorf("status too long, %d characters provided but limit is %d", len(form.Status), maxChars)
}
}
// validate media attachments
- if len(form.MediaIDs) > config.MaxMediaFiles {
- return fmt.Errorf("too many media files attached to status, %d attached but limit is %d", len(form.MediaIDs), config.MaxMediaFiles)
+ if len(form.MediaIDs) > maxMediaFiles {
+ return fmt.Errorf("too many media files attached to status, %d attached but limit is %d", len(form.MediaIDs), maxMediaFiles)
}
// validate poll
@@ -139,20 +148,20 @@ func validateCreateStatus(form *model.AdvancedStatusCreateForm, config *config.S
if form.Poll.Options == nil {
return errors.New("poll with no options")
}
- if len(form.Poll.Options) > config.PollMaxOptions {
- return fmt.Errorf("too many poll options provided, %d provided but limit is %d", len(form.Poll.Options), config.PollMaxOptions)
+ if len(form.Poll.Options) > maxPollOptions {
+ return fmt.Errorf("too many poll options provided, %d provided but limit is %d", len(form.Poll.Options), maxPollOptions)
}
for _, p := range form.Poll.Options {
- if len(p) > config.PollOptionMaxChars {
- return fmt.Errorf("poll option too long, %d characters provided but limit is %d", len(p), config.PollOptionMaxChars)
+ if len(p) > maxPollChars {
+ return fmt.Errorf("poll option too long, %d characters provided but limit is %d", len(p), maxPollChars)
}
}
}
// validate spoiler text/cw
if form.SpoilerText != "" {
- if len(form.SpoilerText) > config.CWMaxChars {
- return fmt.Errorf("content-warning/spoilertext too long, %d characters provided but limit is %d", len(form.SpoilerText), config.CWMaxChars)
+ if len(form.SpoilerText) > maxCwChars {
+ return fmt.Errorf("content-warning/spoilertext too long, %d characters provided but limit is %d", len(form.SpoilerText), maxCwChars)
}
}
diff --git a/internal/api/client/streaming/streaming.go b/internal/api/client/streaming/streaming.go
index fd52419e0..cd499455f 100644
--- a/internal/api/client/streaming/streaming.go
+++ b/internal/api/client/streaming/streaming.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -40,14 +39,12 @@ const (
// Module implements the api.ClientModule interface for everything related to streaming
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new streaming module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/timeline/timeline.go b/internal/api/client/timeline/timeline.go
index 2aa939f61..3e9b37e07 100644
--- a/internal/api/client/timeline/timeline.go
+++ b/internal/api/client/timeline/timeline.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -48,14 +47,12 @@ const (
// Module implements the ClientAPIModule interface for everything relating to viewing timelines
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new timeline module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/user/user.go b/internal/api/client/user/user.go
index ff28a197f..89f71344b 100644
--- a/internal/api/client/user/user.go
+++ b/internal/api/client/user/user.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -36,14 +35,12 @@ const (
// Module implements the ClientAPIModule interface
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new user module
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/client/user/user_test.go b/internal/api/client/user/user_test.go
index 76d95b2d9..4a9a1a9d7 100644
--- a/internal/api/client/user/user_test.go
+++ b/internal/api/client/user/user_test.go
@@ -22,7 +22,6 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/client/user"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -34,7 +33,6 @@ import (
type UserStandardTestSuite struct {
suite.Suite
- config *config.Config
db db.DB
tc typeutils.TypeConverter
federator federation.Federator
@@ -54,21 +52,21 @@ type UserStandardTestSuite struct {
}
func (suite *UserStandardTestSuite) SetupTest() {
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
suite.testTokens = testrig.NewTestTokens()
suite.testClients = testrig.NewTestClients()
suite.testApplications = testrig.NewTestApplications()
suite.testUsers = testrig.NewTestUsers()
suite.testAccounts = testrig.NewTestAccounts()
- suite.config = testrig.NewTestConfig()
suite.db = testrig.NewTestDB()
suite.storage = testrig.NewTestStorage()
- testrig.InitTestLog()
suite.tc = testrig.NewTestTypeConverter(suite.db)
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db), suite.storage)
suite.sentEmails = make(map[string]string)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", suite.sentEmails)
suite.processor = testrig.NewTestProcessor(suite.db, suite.storage, suite.federator, suite.emailSender)
- suite.userModule = user.New(suite.config, suite.processor).(*user.Module)
+ suite.userModule = user.New(suite.processor).(*user.Module)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
}
diff --git a/internal/api/s2s/nodeinfo/nodeinfo.go b/internal/api/s2s/nodeinfo/nodeinfo.go
index 5d1d03b24..1cbf32760 100644
--- a/internal/api/s2s/nodeinfo/nodeinfo.go
+++ b/internal/api/s2s/nodeinfo/nodeinfo.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -36,14 +35,12 @@ const (
// Module implements the FederationModule interface
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new nodeinfo module
-func New(config *config.Config, processor processing.Processor) api.FederationModule {
+func New(processor processing.Processor) api.FederationModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/s2s/user/inboxpost_test.go b/internal/api/s2s/user/inboxpost_test.go
index 4d1c05757..716dcfc25 100644
--- a/internal/api/s2s/user/inboxpost_test.go
+++ b/internal/api/s2s/user/inboxpost_test.go
@@ -87,7 +87,7 @@ func (suite *InboxPostTestSuite) TestPostBlock() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
@@ -187,7 +187,7 @@ func (suite *InboxPostTestSuite) TestPostUnblock() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
@@ -277,7 +277,7 @@ func (suite *InboxPostTestSuite) TestPostUpdate() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
@@ -398,7 +398,7 @@ func (suite *InboxPostTestSuite) TestPostDelete() {
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
err = processor.Start(context.Background())
suite.NoError(err)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
diff --git a/internal/api/s2s/user/outboxget_test.go b/internal/api/s2s/user/outboxget_test.go
index 251051cbd..4cd556bbe 100644
--- a/internal/api/s2s/user/outboxget_test.go
+++ b/internal/api/s2s/user/outboxget_test.go
@@ -48,7 +48,7 @@ func (suite *OutboxGetTestSuite) TestGetOutbox() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
@@ -102,7 +102,7 @@ func (suite *OutboxGetTestSuite) TestGetOutboxFirstPage() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
@@ -156,7 +156,7 @@ func (suite *OutboxGetTestSuite) TestGetOutboxNextPage() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
diff --git a/internal/api/s2s/user/repliesget_test.go b/internal/api/s2s/user/repliesget_test.go
index cd1094b53..a4229bb21 100644
--- a/internal/api/s2s/user/repliesget_test.go
+++ b/internal/api/s2s/user/repliesget_test.go
@@ -51,7 +51,7 @@ func (suite *RepliesGetTestSuite) TestGetReplies() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
@@ -111,7 +111,7 @@ func (suite *RepliesGetTestSuite) TestGetRepliesNext() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
@@ -174,7 +174,7 @@ func (suite *RepliesGetTestSuite) TestGetRepliesLast() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
diff --git a/internal/api/s2s/user/user.go b/internal/api/s2s/user/user.go
index 56b940f1d..891f2c1b1 100644
--- a/internal/api/s2s/user/user.go
+++ b/internal/api/s2s/user/user.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
"github.com/superseriousbusiness/gotosocial/internal/util"
@@ -66,14 +65,12 @@ const (
// Module implements the FederationAPIModule interface
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new auth module
-func New(config *config.Config, processor processing.Processor) api.FederationModule {
+func New(processor processing.Processor) api.FederationModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/s2s/user/user_test.go b/internal/api/s2s/user/user_test.go
index da305488f..ed6a83312 100644
--- a/internal/api/s2s/user/user_test.go
+++ b/internal/api/s2s/user/user_test.go
@@ -23,7 +23,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user"
"github.com/superseriousbusiness/gotosocial/internal/api/security"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -37,7 +36,6 @@ import (
type UserStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
tc typeutils.TypeConverter
federator federation.Federator
@@ -73,17 +71,18 @@ func (suite *UserStandardTestSuite) SetupSuite() {
}
func (suite *UserStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
+
suite.db = testrig.NewTestDB()
suite.tc = testrig.NewTestTypeConverter(suite.db)
suite.storage = testrig.NewTestStorage()
- testrig.InitTestLog()
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db), suite.storage)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
suite.processor = testrig.NewTestProcessor(suite.db, suite.storage, suite.federator, suite.emailSender)
- suite.userModule = user.New(suite.config, suite.processor).(*user.Module)
+ suite.userModule = user.New(suite.processor).(*user.Module)
suite.oauthServer = testrig.NewTestOauthServer(suite.db)
- suite.securityModule = security.New(suite.config, suite.db, suite.oauthServer).(*security.Module)
+ suite.securityModule = security.New(suite.db, suite.oauthServer).(*security.Module)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
}
diff --git a/internal/api/s2s/user/userget_test.go b/internal/api/s2s/user/userget_test.go
index 303295cfe..8ad4a8151 100644
--- a/internal/api/s2s/user/userget_test.go
+++ b/internal/api/s2s/user/userget_test.go
@@ -49,7 +49,7 @@ func (suite *UserGetTestSuite) TestGetUser() {
federator := testrig.NewTestFederator(suite.db, tc, suite.storage)
emailSender := testrig.NewEmailSender("../../../../web/template/", nil)
processor := testrig.NewTestProcessor(suite.db, suite.storage, federator, emailSender)
- userModule := user.New(suite.config, processor).(*user.Module)
+ userModule := user.New(processor).(*user.Module)
// setup request
recorder := httptest.NewRecorder()
diff --git a/internal/api/s2s/webfinger/webfinger.go b/internal/api/s2s/webfinger/webfinger.go
index c447d730e..2ad77d56f 100644
--- a/internal/api/s2s/webfinger/webfinger.go
+++ b/internal/api/s2s/webfinger/webfinger.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/router"
)
@@ -34,14 +33,12 @@ const (
// Module implements the FederationModule interface
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new webfinger module
-func New(config *config.Config, processor processing.Processor) api.FederationModule {
+func New(processor processing.Processor) api.FederationModule {
return &Module{
- config: config,
processor: processor,
}
}
diff --git a/internal/api/s2s/webfinger/webfinger_test.go b/internal/api/s2s/webfinger/webfinger_test.go
index 679dbcd5d..f2e6d0659 100644
--- a/internal/api/s2s/webfinger/webfinger_test.go
+++ b/internal/api/s2s/webfinger/webfinger_test.go
@@ -28,7 +28,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/ap"
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/webfinger"
"github.com/superseriousbusiness/gotosocial/internal/api/security"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -42,7 +41,6 @@ import (
type WebfingerStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
tc typeutils.TypeConverter
federator federation.Federator
@@ -76,17 +74,18 @@ func (suite *WebfingerStandardTestSuite) SetupSuite() {
}
func (suite *WebfingerStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
+
suite.db = testrig.NewTestDB()
suite.tc = testrig.NewTestTypeConverter(suite.db)
suite.storage = testrig.NewTestStorage()
- testrig.InitTestLog()
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db), suite.storage)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
suite.processor = testrig.NewTestProcessor(suite.db, suite.storage, suite.federator, suite.emailSender)
- suite.webfingerModule = webfinger.New(suite.config, suite.processor).(*webfinger.Module)
+ suite.webfingerModule = webfinger.New(suite.processor).(*webfinger.Module)
suite.oauthServer = testrig.NewTestOauthServer(suite.db)
- suite.securityModule = security.New(suite.config, suite.db, suite.oauthServer).(*security.Module)
+ suite.securityModule = security.New(suite.db, suite.oauthServer).(*security.Module)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
}
diff --git a/internal/api/s2s/webfinger/webfingerget.go b/internal/api/s2s/webfinger/webfingerget.go
index 3552394f2..b7f3c714d 100644
--- a/internal/api/s2s/webfinger/webfingerget.go
+++ b/internal/api/s2s/webfinger/webfingerget.go
@@ -26,6 +26,8 @@ import (
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
@@ -59,16 +61,19 @@ func (m *Module) WebfingerGETRequest(c *gin.Context) {
}
username := strings.ToLower(usernameAndAccountDomain[0])
- accountDomain := strings.ToLower(usernameAndAccountDomain[1])
- if username == "" || accountDomain == "" {
+ requestedAccountDomain := strings.ToLower(usernameAndAccountDomain[1])
+ if username == "" || requestedAccountDomain == "" {
l.Debug("aborting request because username or domain was empty")
c.JSON(http.StatusBadRequest, gin.H{"error": "bad request"})
return
}
- if accountDomain != m.config.AccountDomain && accountDomain != m.config.Host {
- l.Debugf("aborting request because accountDomain %s does not belong to this instance", accountDomain)
- c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("accountDomain %s does not belong to this instance", accountDomain)})
+ accountDomain := viper.GetString(config.Keys.AccountDomain)
+ host := viper.GetString(config.Keys.Host)
+
+ if requestedAccountDomain != accountDomain && requestedAccountDomain != host {
+ l.Debugf("aborting request because accountDomain %s does not belong to this instance", requestedAccountDomain)
+ c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("accountDomain %s does not belong to this instance", requestedAccountDomain)})
return
}
diff --git a/internal/api/s2s/webfinger/webfingerget_test.go b/internal/api/s2s/webfinger/webfingerget_test.go
index 19c637929..8314972d6 100644
--- a/internal/api/s2s/webfinger/webfingerget_test.go
+++ b/internal/api/s2s/webfinger/webfingerget_test.go
@@ -27,9 +27,11 @@ import (
"testing"
"github.com/gin-gonic/gin"
+ "github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/webfinger"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -42,7 +44,8 @@ func (suite *WebfingerGetTestSuite) TestFingerUser() {
targetAccount := suite.testAccounts["local_account_1"]
// setup request
- requestPath := fmt.Sprintf("/%s?resource=acct:%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, suite.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ requestPath := fmt.Sprintf("/%s?resource=acct:%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, host)
recorder := httptest.NewRecorder()
ctx, _ := gin.CreateTestContext(recorder)
@@ -63,10 +66,10 @@ func (suite *WebfingerGetTestSuite) TestFingerUser() {
}
func (suite *WebfingerGetTestSuite) TestFingerUserWithDifferentAccountDomainByHost() {
- suite.config.Host = "gts.example.org"
- suite.config.AccountDomain = "example.org"
- suite.processor = processing.NewProcessor(suite.config, suite.tc, suite.federator, testrig.NewTestOauthServer(suite.db), testrig.NewTestMediaHandler(suite.db, suite.storage), suite.storage, testrig.NewTestTimelineManager(suite.db), suite.db, suite.emailSender)
- suite.webfingerModule = webfinger.New(suite.config, suite.processor).(*webfinger.Module)
+ viper.Set(config.Keys.Host, "gts.example.org")
+ viper.Set(config.Keys.AccountDomain, "example.org")
+ suite.processor = processing.NewProcessor(suite.tc, suite.federator, testrig.NewTestOauthServer(suite.db), testrig.NewTestMediaHandler(suite.db, suite.storage), suite.storage, testrig.NewTestTimelineManager(suite.db), suite.db, suite.emailSender)
+ suite.webfingerModule = webfinger.New(suite.processor).(*webfinger.Module)
targetAccount := accountDomainAccount()
if err := suite.db.Put(context.Background(), targetAccount); err != nil {
@@ -74,7 +77,8 @@ func (suite *WebfingerGetTestSuite) TestFingerUserWithDifferentAccountDomainByHo
}
// setup request
- requestPath := fmt.Sprintf("/%s?resource=acct:%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, suite.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ requestPath := fmt.Sprintf("/%s?resource=acct:%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, host)
recorder := httptest.NewRecorder()
ctx, _ := gin.CreateTestContext(recorder)
@@ -95,10 +99,10 @@ func (suite *WebfingerGetTestSuite) TestFingerUserWithDifferentAccountDomainByHo
}
func (suite *WebfingerGetTestSuite) TestFingerUserWithDifferentAccountDomainByAccountDomain() {
- suite.config.Host = "gts.example.org"
- suite.config.AccountDomain = "example.org"
- suite.processor = processing.NewProcessor(suite.config, suite.tc, suite.federator, testrig.NewTestOauthServer(suite.db), testrig.NewTestMediaHandler(suite.db, suite.storage), suite.storage, testrig.NewTestTimelineManager(suite.db), suite.db, suite.emailSender)
- suite.webfingerModule = webfinger.New(suite.config, suite.processor).(*webfinger.Module)
+ viper.Set(config.Keys.Host, "gts.example.org")
+ viper.Set(config.Keys.AccountDomain, "example.org")
+ suite.processor = processing.NewProcessor(suite.tc, suite.federator, testrig.NewTestOauthServer(suite.db), testrig.NewTestMediaHandler(suite.db, suite.storage), suite.storage, testrig.NewTestTimelineManager(suite.db), suite.db, suite.emailSender)
+ suite.webfingerModule = webfinger.New(suite.processor).(*webfinger.Module)
targetAccount := accountDomainAccount()
if err := suite.db.Put(context.Background(), targetAccount); err != nil {
@@ -106,7 +110,8 @@ func (suite *WebfingerGetTestSuite) TestFingerUserWithDifferentAccountDomainByAc
}
// setup request
- requestPath := fmt.Sprintf("/%s?resource=acct:%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, suite.config.AccountDomain)
+ accountDomain := viper.GetString(config.Keys.AccountDomain)
+ requestPath := fmt.Sprintf("/%s?resource=acct:%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, accountDomain)
recorder := httptest.NewRecorder()
ctx, _ := gin.CreateTestContext(recorder)
@@ -130,7 +135,8 @@ func (suite *WebfingerGetTestSuite) TestFingerUserWithoutAcct() {
targetAccount := suite.testAccounts["local_account_1"]
// setup request -- leave out the 'acct:' prefix, which is prettymuch what pixelfed currently does
- requestPath := fmt.Sprintf("/%s?resource=%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, suite.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ requestPath := fmt.Sprintf("/%s?resource=%s@%s", webfinger.WebfingerBasePath, targetAccount.Username, host)
recorder := httptest.NewRecorder()
ctx, _ := gin.CreateTestContext(recorder)
diff --git a/internal/api/security/security.go b/internal/api/security/security.go
index 0379b2d53..1e55d5bc5 100644
--- a/internal/api/security/security.go
+++ b/internal/api/security/security.go
@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/superseriousbusiness/gotosocial/internal/api"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/router"
@@ -32,15 +31,13 @@ const robotsPath = "/robots.txt"
// Module implements the ClientAPIModule interface for security middleware
type Module struct {
- config *config.Config
db db.DB
server oauth.Server
}
// New returns a new security module
-func New(config *config.Config, db db.DB, server oauth.Server) api.ClientModule {
+func New(db db.DB, server oauth.Server) api.ClientModule {
return &Module{
- config: config,
db: db,
server: server,
}
diff --git a/internal/config/config.go b/internal/config/config.go
deleted file mode 100644
index eb9e6385b..000000000
--- a/internal/config/config.go
+++ /dev/null
@@ -1,622 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package config
-
-import (
- "errors"
- "fmt"
- "os"
-
- "gopkg.in/yaml.v2"
-)
-
-// Flags and usage strings for configuration.
-const (
- UsernameFlag = "username"
- UsernameUsage = "the username to create/delete/etc"
-
- EmailFlag = "email"
- EmailUsage = "the email address of this account"
-
- PasswordFlag = "password"
- PasswordUsage = "the password to set for this account"
-
- TransPathFlag = "path"
- TransPathUsage = "the path of the file to import from/export to"
-)
-
-// Config pulls together all the configuration needed to run gotosocial
-type Config struct {
- /*
- Parseable from .yaml configuration file.
- For long-running commands (server start etc).
- */
-
- LogLevel string `yaml:"logLevel"`
- ApplicationName string `yaml:"applicationName"`
- Host string `yaml:"host"`
- AccountDomain string `yaml:"accountDomain"`
- Protocol string `yaml:"protocol"`
- BindAddress string `yaml:"bindAddress"`
- Port int `yaml:"port"`
- TrustedProxies []string `yaml:"trustedProxies"`
- DBConfig *DBConfig `yaml:"db"`
- TemplateConfig *TemplateConfig `yaml:"template"`
- AccountsConfig *AccountsConfig `yaml:"accounts"`
- MediaConfig *MediaConfig `yaml:"media"`
- StorageConfig *StorageConfig `yaml:"storage"`
- StatusesConfig *StatusesConfig `yaml:"statuses"`
- LetsEncryptConfig *LetsEncryptConfig `yaml:"letsEncrypt"`
- OIDCConfig *OIDCConfig `yaml:"oidc"`
- SMTPConfig *SMTPConfig `yaml:"smtp"`
-
- /*
- Not parsed from .yaml configuration file.
- */
- AccountCLIFlags map[string]string
- ExportCLIFlags map[string]string
- SoftwareVersion string
-}
-
-// FromFile returns a new config from a file, or an error if something goes amiss.
-func FromFile(path string) (*Config, error) {
- if path != "" {
- c, err := loadFromFile(path)
- if err != nil {
- return nil, fmt.Errorf("error creating config: %s", err)
- }
- return c, nil
- }
- return Default(), nil
-}
-
-// loadFromFile takes a path to a yaml file and attempts to load a Config object from it
-func loadFromFile(path string) (*Config, error) {
- bytes, err := os.ReadFile(path)
- if err != nil {
- return nil, fmt.Errorf("could not read file at path %s: %s", path, err)
- }
-
- config := Default()
- if err := yaml.Unmarshal(bytes, config); err != nil {
- return nil, fmt.Errorf("could not unmarshal file at path %s: %s", path, err)
- }
-
- return config, nil
-}
-
-// ParseCLIFlags sets flags on the config using the provided Flags object
-func (c *Config) ParseCLIFlags(f KeyedFlags, version string) error {
- fn := GetFlagNames()
-
- // For all of these flags, we only want to set them on the config if:
- //
- // a) They haven't been set at all in the config file we already parsed,
- // and so we take the default from the flags object.
- //
- // b) They may have been set in the config, but they've *also* been set explicitly
- // as a command-line argument or an env variable, which takes priority.
-
- // general flags
- if f.IsSet(fn.LogLevel) {
- c.LogLevel = f.String(fn.LogLevel)
- }
-
- if f.IsSet(fn.ApplicationName) {
- c.ApplicationName = f.String(fn.ApplicationName)
- }
-
- if f.IsSet(fn.Host) {
- c.Host = f.String(fn.Host)
- }
- if c.Host == "" {
- return errors.New("host was not set")
- }
-
- if f.IsSet(fn.AccountDomain) {
- c.AccountDomain = f.String(fn.AccountDomain)
- }
- if c.AccountDomain == "" {
- c.AccountDomain = c.Host // default to whatever the host is, if this is empty
- }
-
- if f.IsSet(fn.Protocol) {
- c.Protocol = f.String(fn.Protocol)
- }
- if c.Protocol == "" {
- return errors.New("protocol was not set")
- }
-
- if f.IsSet(fn.BindAddress) {
- c.BindAddress = f.String(fn.BindAddress)
- }
-
- if f.IsSet(fn.Port) {
- c.Port = f.Int(fn.Port)
- }
-
- if f.IsSet(fn.TrustedProxies) {
- c.TrustedProxies = f.StringSlice(fn.TrustedProxies)
- }
-
- // db flags
- if f.IsSet(fn.DbType) {
- c.DBConfig.Type = f.String(fn.DbType)
- }
-
- if f.IsSet(fn.DbAddress) {
- c.DBConfig.Address = f.String(fn.DbAddress)
- }
-
- if f.IsSet(fn.DbPort) {
- c.DBConfig.Port = f.Int(fn.DbPort)
- }
-
- if f.IsSet(fn.DbUser) {
- c.DBConfig.User = f.String(fn.DbUser)
- }
-
- if f.IsSet(fn.DbPassword) {
- c.DBConfig.Password = f.String(fn.DbPassword)
- }
-
- if f.IsSet(fn.DbDatabase) {
- c.DBConfig.Database = f.String(fn.DbDatabase)
- }
-
- if f.IsSet(fn.DbTLSMode) {
- c.DBConfig.TLSMode = DBTLSMode(f.String(fn.DbTLSMode))
- }
-
- if f.IsSet(fn.DbTLSCACert) {
- c.DBConfig.TLSCACert = f.String(fn.DbTLSCACert)
- }
-
- // template flags
- if f.IsSet(fn.TemplateBaseDir) {
- c.TemplateConfig.BaseDir = f.String(fn.TemplateBaseDir)
- }
-
- // template flags
- if f.IsSet(fn.AssetBaseDir) {
- c.TemplateConfig.AssetBaseDir = f.String(fn.AssetBaseDir)
- }
-
- // accounts flags
- if f.IsSet(fn.AccountsOpenRegistration) {
- c.AccountsConfig.OpenRegistration = f.Bool(fn.AccountsOpenRegistration)
- }
-
- if f.IsSet(fn.AccountsApprovalRequired) {
- c.AccountsConfig.RequireApproval = f.Bool(fn.AccountsApprovalRequired)
- }
-
- // media flags
- if f.IsSet(fn.MediaMaxImageSize) {
- c.MediaConfig.MaxImageSize = f.Int(fn.MediaMaxImageSize)
- }
-
- if f.IsSet(fn.MediaMaxVideoSize) {
- c.MediaConfig.MaxVideoSize = f.Int(fn.MediaMaxVideoSize)
- }
-
- if f.IsSet(fn.MediaMinDescriptionChars) {
- c.MediaConfig.MinDescriptionChars = f.Int(fn.MediaMinDescriptionChars)
- }
-
- if f.IsSet(fn.MediaMaxDescriptionChars) {
- c.MediaConfig.MaxDescriptionChars = f.Int(fn.MediaMaxDescriptionChars)
- }
-
- // storage flags
- if f.IsSet(fn.StorageBackend) {
- c.StorageConfig.Backend = f.String(fn.StorageBackend)
- }
-
- if f.IsSet(fn.StorageBasePath) {
- c.StorageConfig.BasePath = f.String(fn.StorageBasePath)
- }
-
- if f.IsSet(fn.StorageServeProtocol) {
- c.StorageConfig.ServeProtocol = f.String(fn.StorageServeProtocol)
- }
-
- if f.IsSet(fn.StorageServeHost) {
- c.StorageConfig.ServeHost = f.String(fn.StorageServeHost)
- }
-
- if f.IsSet(fn.StorageServeBasePath) {
- c.StorageConfig.ServeBasePath = f.String(fn.StorageServeBasePath)
- }
-
- // statuses flags
- if f.IsSet(fn.StatusesMaxChars) {
- c.StatusesConfig.MaxChars = f.Int(fn.StatusesMaxChars)
- }
- if f.IsSet(fn.StatusesCWMaxChars) {
- c.StatusesConfig.CWMaxChars = f.Int(fn.StatusesCWMaxChars)
- }
- if f.IsSet(fn.StatusesPollMaxOptions) {
- c.StatusesConfig.PollMaxOptions = f.Int(fn.StatusesPollMaxOptions)
- }
- if f.IsSet(fn.StatusesPollOptionMaxChars) {
- c.StatusesConfig.PollOptionMaxChars = f.Int(fn.StatusesPollOptionMaxChars)
- }
- if f.IsSet(fn.StatusesMaxMediaFiles) {
- c.StatusesConfig.MaxMediaFiles = f.Int(fn.StatusesMaxMediaFiles)
- }
-
- // letsencrypt flags
- if f.IsSet(fn.LetsEncryptEnabled) {
- c.LetsEncryptConfig.Enabled = f.Bool(fn.LetsEncryptEnabled)
- }
-
- if f.IsSet(fn.LetsEncryptPort) {
- c.LetsEncryptConfig.Port = f.Int(fn.LetsEncryptPort)
- }
-
- if f.IsSet(fn.LetsEncryptCertDir) {
- c.LetsEncryptConfig.CertDir = f.String(fn.LetsEncryptCertDir)
- }
-
- if f.IsSet(fn.LetsEncryptEmailAddress) {
- c.LetsEncryptConfig.EmailAddress = f.String(fn.LetsEncryptEmailAddress)
- }
-
- // OIDC flags
- if f.IsSet(fn.OIDCEnabled) {
- c.OIDCConfig.Enabled = f.Bool(fn.OIDCEnabled)
- }
-
- if f.IsSet(fn.OIDCIdpName) {
- c.OIDCConfig.IDPName = f.String(fn.OIDCIdpName)
- }
-
- if f.IsSet(fn.OIDCSkipVerification) {
- c.OIDCConfig.SkipVerification = f.Bool(fn.OIDCSkipVerification)
- }
-
- if f.IsSet(fn.OIDCIssuer) {
- c.OIDCConfig.Issuer = f.String(fn.OIDCIssuer)
- }
-
- if f.IsSet(fn.OIDCClientID) {
- c.OIDCConfig.ClientID = f.String(fn.OIDCClientID)
- }
-
- if f.IsSet(fn.OIDCClientSecret) {
- c.OIDCConfig.ClientSecret = f.String(fn.OIDCClientSecret)
- }
-
- if f.IsSet(fn.OIDCScopes) {
- c.OIDCConfig.Scopes = f.StringSlice(fn.OIDCScopes)
- }
-
- // smtp flags
- if f.IsSet(fn.SMTPHost) {
- c.SMTPConfig.Host = f.String(fn.SMTPHost)
- }
-
- if f.IsSet(fn.SMTPPort) {
- c.SMTPConfig.Port = f.Int(fn.SMTPPort)
- }
-
- if f.IsSet(fn.SMTPUsername) {
- c.SMTPConfig.Username = f.String(fn.SMTPUsername)
- }
-
- if f.IsSet(fn.SMTPPassword) {
- c.SMTPConfig.Password = f.String(fn.SMTPPassword)
- }
-
- if f.IsSet(fn.SMTPFrom) {
- c.SMTPConfig.From = f.String(fn.SMTPFrom)
- }
-
- // command-specific flags
-
- // admin account CLI flags
- c.AccountCLIFlags[UsernameFlag] = f.String(UsernameFlag)
- c.AccountCLIFlags[EmailFlag] = f.String(EmailFlag)
- c.AccountCLIFlags[PasswordFlag] = f.String(PasswordFlag)
-
- // export CLI flags
- c.ExportCLIFlags[TransPathFlag] = f.String(TransPathFlag)
-
- c.SoftwareVersion = version
- return nil
-}
-
-// KeyedFlags is a wrapper for any type that can store keyed flags and give them back.
-// HINT: This works with a urfave cli context struct ;)
-type KeyedFlags interface {
- Bool(k string) bool
- String(k string) string
- StringSlice(k string) []string
- Int(k string) int
- IsSet(k string) bool
-}
-
-// Flags is used for storing the names of the various flags used for
-// initializing and storing urfavecli flag variables.
-type Flags struct {
- LogLevel string
- ApplicationName string
- ConfigPath string
- Host string
- AccountDomain string
- Protocol string
- BindAddress string
- Port string
- TrustedProxies string
-
- DbType string
- DbAddress string
- DbPort string
- DbUser string
- DbPassword string
- DbDatabase string
- DbTLSMode string
- DbTLSCACert string
-
- TemplateBaseDir string
- AssetBaseDir string
-
- AccountsOpenRegistration string
- AccountsApprovalRequired string
- AccountsReasonRequired string
-
- MediaMaxImageSize string
- MediaMaxVideoSize string
- MediaMinDescriptionChars string
- MediaMaxDescriptionChars string
-
- StorageBackend string
- StorageBasePath string
- StorageServeProtocol string
- StorageServeHost string
- StorageServeBasePath string
-
- StatusesMaxChars string
- StatusesCWMaxChars string
- StatusesPollMaxOptions string
- StatusesPollOptionMaxChars string
- StatusesMaxMediaFiles string
-
- LetsEncryptEnabled string
- LetsEncryptCertDir string
- LetsEncryptEmailAddress string
- LetsEncryptPort string
-
- OIDCEnabled string
- OIDCIdpName string
- OIDCSkipVerification string
- OIDCIssuer string
- OIDCClientID string
- OIDCClientSecret string
- OIDCScopes string
-
- SMTPHost string
- SMTPPort string
- SMTPUsername string
- SMTPPassword string
- SMTPFrom string
-}
-
-// Defaults contains all the default values for a gotosocial config
-type Defaults struct {
- LogLevel string
- ApplicationName string
- ConfigPath string
- Host string
- AccountDomain string
- Protocol string
- BindAddress string
- Port int
- TrustedProxies []string
- SoftwareVersion string
-
- DbType string
- DbAddress string
- DbPort int
- DbUser string
- DbPassword string
- DbDatabase string
- DBTlsMode string
- DBTlsCACert string
-
- TemplateBaseDir string
- AssetBaseDir string
-
- AccountsOpenRegistration bool
- AccountsRequireApproval bool
- AccountsReasonRequired bool
-
- MediaMaxImageSize int
- MediaMaxVideoSize int
- MediaMinDescriptionChars int
- MediaMaxDescriptionChars int
-
- StorageBackend string
- StorageBasePath string
- StorageServeProtocol string
- StorageServeHost string
- StorageServeBasePath string
-
- StatusesMaxChars int
- StatusesCWMaxChars int
- StatusesPollMaxOptions int
- StatusesPollOptionMaxChars int
- StatusesMaxMediaFiles int
-
- LetsEncryptEnabled bool
- LetsEncryptCertDir string
- LetsEncryptEmailAddress string
- LetsEncryptPort int
-
- OIDCEnabled bool
- OIDCIdpName string
- OIDCSkipVerification bool
- OIDCIssuer string
- OIDCClientID string
- OIDCClientSecret string
- OIDCScopes []string
-
- SMTPHost string
- SMTPPort int
- SMTPUsername string
- SMTPPassword string
- SMTPFrom string
-}
-
-// GetFlagNames returns a struct containing the names of the various flags used for
-// initializing and storing urfavecli flag variables.
-func GetFlagNames() Flags {
- return Flags{
- LogLevel: "log-level",
- ApplicationName: "application-name",
- ConfigPath: "config-path",
- Host: "host",
- AccountDomain: "account-domain",
- Protocol: "protocol",
- BindAddress: "bind-address",
- Port: "port",
- TrustedProxies: "trusted-proxies",
-
- DbType: "db-type",
- DbAddress: "db-address",
- DbPort: "db-port",
- DbUser: "db-user",
- DbPassword: "db-password",
- DbDatabase: "db-database",
- DbTLSMode: "db-tls-mode",
- DbTLSCACert: "db-tls-ca-cert",
-
- TemplateBaseDir: "template-basedir",
- AssetBaseDir: "asset-basedir",
-
- AccountsOpenRegistration: "accounts-open-registration",
- AccountsApprovalRequired: "accounts-approval-required",
- AccountsReasonRequired: "accounts-reason-required",
-
- MediaMaxImageSize: "media-max-image-size",
- MediaMaxVideoSize: "media-max-video-size",
- MediaMinDescriptionChars: "media-min-description-chars",
- MediaMaxDescriptionChars: "media-max-description-chars",
-
- StorageBackend: "storage-backend",
- StorageBasePath: "storage-base-path",
- StorageServeProtocol: "storage-serve-protocol",
- StorageServeHost: "storage-serve-host",
- StorageServeBasePath: "storage-serve-base-path",
-
- StatusesMaxChars: "statuses-max-chars",
- StatusesCWMaxChars: "statuses-cw-max-chars",
- StatusesPollMaxOptions: "statuses-poll-max-options",
- StatusesPollOptionMaxChars: "statuses-poll-option-max-chars",
- StatusesMaxMediaFiles: "statuses-max-media-files",
-
- LetsEncryptEnabled: "letsencrypt-enabled",
- LetsEncryptPort: "letsencrypt-port",
- LetsEncryptCertDir: "letsencrypt-cert-dir",
- LetsEncryptEmailAddress: "letsencrypt-email",
-
- OIDCEnabled: "oidc-enabled",
- OIDCIdpName: "oidc-idp-name",
- OIDCSkipVerification: "oidc-skip-verification",
- OIDCIssuer: "oidc-issuer",
- OIDCClientID: "oidc-client-id",
- OIDCClientSecret: "oidc-client-secret",
- OIDCScopes: "oidc-scopes",
-
- SMTPHost: "smtp-host",
- SMTPPort: "smtp-port",
- SMTPUsername: "smtp-username",
- SMTPPassword: "smtp-password",
- SMTPFrom: "smtp-from",
- }
-}
-
-// GetEnvNames returns a struct containing the names of the environment variable keys used for
-// initializing and storing urfavecli flag variables.
-func GetEnvNames() Flags {
- return Flags{
- LogLevel: "GTS_LOG_LEVEL",
- ApplicationName: "GTS_APPLICATION_NAME",
- ConfigPath: "GTS_CONFIG_PATH",
- Host: "GTS_HOST",
- AccountDomain: "GTS_ACCOUNT_DOMAIN",
- Protocol: "GTS_PROTOCOL",
- BindAddress: "GTS_BIND_ADDRESS",
- Port: "GTS_PORT",
- TrustedProxies: "GTS_TRUSTED_PROXIES",
-
- DbType: "GTS_DB_TYPE",
- DbAddress: "GTS_DB_ADDRESS",
- DbPort: "GTS_DB_PORT",
- DbUser: "GTS_DB_USER",
- DbPassword: "GTS_DB_PASSWORD",
- DbDatabase: "GTS_DB_DATABASE",
- DbTLSMode: "GTS_DB_TLS_MODE",
- DbTLSCACert: "GTS_DB_CA_CERT",
-
- TemplateBaseDir: "GTS_TEMPLATE_BASEDIR",
- AssetBaseDir: "GTS_ASSET_BASEDIR",
-
- AccountsOpenRegistration: "GTS_ACCOUNTS_OPEN_REGISTRATION",
- AccountsApprovalRequired: "GTS_ACCOUNTS_APPROVAL_REQUIRED",
- AccountsReasonRequired: "GTS_ACCOUNTS_REASON_REQUIRED",
-
- MediaMaxImageSize: "GTS_MEDIA_MAX_IMAGE_SIZE",
- MediaMaxVideoSize: "GTS_MEDIA_MAX_VIDEO_SIZE",
- MediaMinDescriptionChars: "GTS_MEDIA_MIN_DESCRIPTION_CHARS",
- MediaMaxDescriptionChars: "GTS_MEDIA_MAX_DESCRIPTION_CHARS",
-
- StorageBackend: "GTS_STORAGE_BACKEND",
- StorageBasePath: "GTS_STORAGE_BASE_PATH",
- StorageServeProtocol: "GTS_STORAGE_SERVE_PROTOCOL",
- StorageServeHost: "GTS_STORAGE_SERVE_HOST",
- StorageServeBasePath: "GTS_STORAGE_SERVE_BASE_PATH",
-
- StatusesMaxChars: "GTS_STATUSES_MAX_CHARS",
- StatusesCWMaxChars: "GTS_STATUSES_CW_MAX_CHARS",
- StatusesPollMaxOptions: "GTS_STATUSES_POLL_MAX_OPTIONS",
- StatusesPollOptionMaxChars: "GTS_STATUSES_POLL_OPTION_MAX_CHARS",
- StatusesMaxMediaFiles: "GTS_STATUSES_MAX_MEDIA_FILES",
-
- LetsEncryptEnabled: "GTS_LETSENCRYPT_ENABLED",
- LetsEncryptPort: "GTS_LETSENCRYPT_PORT",
- LetsEncryptCertDir: "GTS_LETSENCRYPT_CERT_DIR",
- LetsEncryptEmailAddress: "GTS_LETSENCRYPT_EMAIL",
-
- OIDCEnabled: "GTS_OIDC_ENABLED",
- OIDCIdpName: "GTS_OIDC_IDP_NAME",
- OIDCSkipVerification: "GTS_OIDC_SKIP_VERIFICATION",
- OIDCIssuer: "GTS_OIDC_ISSUER",
- OIDCClientID: "GTS_OIDC_CLIENT_ID",
- OIDCClientSecret: "GTS_OIDC_CLIENT_SECRET",
- OIDCScopes: "GTS_OIDC_SCOPES",
-
- SMTPHost: "SMTP_HOST",
- SMTPPort: "SMTP_PORT",
- SMTPUsername: "SMTP_USERNAME",
- SMTPPassword: "SMTP_PASSWORD",
- SMTPFrom: "SMTP_FROM",
- }
-}
diff --git a/internal/config/db.go b/internal/config/db.go
deleted file mode 100644
index ffdc07521..000000000
--- a/internal/config/db.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package config
-
-// DBConfig provides configuration options for the database connection
-type DBConfig struct {
- Type string `yaml:"type"`
- Address string `yaml:"address"`
- Port int `yaml:"port"`
- User string `yaml:"user"`
- Password string `yaml:"password"`
- Database string `yaml:"database"`
- ApplicationName string `yaml:"applicationName"`
- TLSMode DBTLSMode `yaml:"tlsMode"`
- TLSCACert string `yaml:"tlsCACert"`
-}
-
-// DBTLSMode describes a mode of connecting to a database with or without TLS.
-type DBTLSMode string
-
-// DBTLSModeDisable does not attempt to make a TLS connection to the database.
-var DBTLSModeDisable DBTLSMode = "disable"
-
-// DBTLSModeEnable attempts to make a TLS connection to the database, but doesn't fail if
-// the certificate passed by the database isn't verified.
-var DBTLSModeEnable DBTLSMode = "enable"
-
-// DBTLSModeRequire attempts to make a TLS connection to the database, and requires
-// that the certificate presented by the database is valid.
-var DBTLSModeRequire DBTLSMode = "require"
-
-// DBTLSModeUnset means that the TLS mode has not been set.
-var DBTLSModeUnset DBTLSMode
diff --git a/internal/config/default.go b/internal/config/default.go
deleted file mode 100644
index 6e8f63177..000000000
--- a/internal/config/default.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package config
-
-import "github.com/coreos/go-oidc/v3/oidc"
-
-// TestDefault returns a default config for testing
-func TestDefault() *Config {
- defaults := GetTestDefaults()
- return &Config{
- LogLevel: defaults.LogLevel,
- ApplicationName: defaults.ApplicationName,
- Host: defaults.Host,
- AccountDomain: defaults.AccountDomain,
- Protocol: defaults.Protocol,
- BindAddress: defaults.BindAddress,
- Port: defaults.Port,
- TrustedProxies: defaults.TrustedProxies,
- SoftwareVersion: defaults.SoftwareVersion,
- DBConfig: &DBConfig{
- Type: defaults.DbType,
- Address: defaults.DbAddress,
- Port: defaults.DbPort,
- User: defaults.DbUser,
- Password: defaults.DbPassword,
- Database: defaults.DbDatabase,
- ApplicationName: defaults.ApplicationName,
- },
- TemplateConfig: &TemplateConfig{
- BaseDir: defaults.TemplateBaseDir,
- AssetBaseDir: defaults.AssetBaseDir,
- },
- AccountsConfig: &AccountsConfig{
- OpenRegistration: defaults.AccountsOpenRegistration,
- RequireApproval: defaults.AccountsRequireApproval,
- ReasonRequired: defaults.AccountsReasonRequired,
- },
- MediaConfig: &MediaConfig{
- MaxImageSize: defaults.MediaMaxImageSize,
- MaxVideoSize: defaults.MediaMaxVideoSize,
- MinDescriptionChars: defaults.MediaMinDescriptionChars,
- MaxDescriptionChars: defaults.MediaMaxDescriptionChars,
- },
- StorageConfig: &StorageConfig{
- Backend: defaults.StorageBackend,
- BasePath: defaults.StorageBasePath,
- ServeProtocol: defaults.StorageServeProtocol,
- ServeHost: defaults.StorageServeHost,
- ServeBasePath: defaults.StorageServeBasePath,
- },
- StatusesConfig: &StatusesConfig{
- MaxChars: defaults.StatusesMaxChars,
- CWMaxChars: defaults.StatusesCWMaxChars,
- PollMaxOptions: defaults.StatusesPollMaxOptions,
- PollOptionMaxChars: defaults.StatusesPollOptionMaxChars,
- MaxMediaFiles: defaults.StatusesMaxMediaFiles,
- },
- LetsEncryptConfig: &LetsEncryptConfig{
- Enabled: defaults.LetsEncryptEnabled,
- Port: defaults.LetsEncryptPort,
- CertDir: defaults.LetsEncryptCertDir,
- EmailAddress: defaults.LetsEncryptEmailAddress,
- },
- OIDCConfig: &OIDCConfig{
- Enabled: defaults.OIDCEnabled,
- IDPName: defaults.OIDCIdpName,
- SkipVerification: defaults.OIDCSkipVerification,
- Issuer: defaults.OIDCIssuer,
- ClientID: defaults.OIDCClientID,
- ClientSecret: defaults.OIDCClientSecret,
- Scopes: defaults.OIDCScopes,
- },
- SMTPConfig: &SMTPConfig{
- Host: defaults.SMTPHost,
- Port: defaults.SMTPPort,
- Username: defaults.SMTPUsername,
- Password: defaults.SMTPPassword,
- From: defaults.SMTPFrom,
- },
- }
-}
-
-// Default returns a config with all default values set
-func Default() *Config {
- defaults := GetDefaults()
- return &Config{
- LogLevel: defaults.LogLevel,
- ApplicationName: defaults.ApplicationName,
- Host: defaults.Host,
- Protocol: defaults.Protocol,
- BindAddress: defaults.BindAddress,
- Port: defaults.Port,
- TrustedProxies: defaults.TrustedProxies,
- SoftwareVersion: defaults.SoftwareVersion,
- DBConfig: &DBConfig{
- Type: defaults.DbType,
- Address: defaults.DbAddress,
- Port: defaults.DbPort,
- User: defaults.DbUser,
- Password: defaults.DbPassword,
- Database: defaults.DbDatabase,
- ApplicationName: defaults.ApplicationName,
- },
- TemplateConfig: &TemplateConfig{
- BaseDir: defaults.TemplateBaseDir,
- AssetBaseDir: defaults.AssetBaseDir,
- },
- AccountsConfig: &AccountsConfig{
- OpenRegistration: defaults.AccountsOpenRegistration,
- RequireApproval: defaults.AccountsRequireApproval,
- ReasonRequired: defaults.AccountsReasonRequired,
- },
- MediaConfig: &MediaConfig{
- MaxImageSize: defaults.MediaMaxImageSize,
- MaxVideoSize: defaults.MediaMaxVideoSize,
- MinDescriptionChars: defaults.MediaMinDescriptionChars,
- MaxDescriptionChars: defaults.MediaMaxDescriptionChars,
- },
- StorageConfig: &StorageConfig{
- Backend: defaults.StorageBackend,
- BasePath: defaults.StorageBasePath,
- ServeProtocol: defaults.StorageServeProtocol,
- ServeHost: defaults.StorageServeHost,
- ServeBasePath: defaults.StorageServeBasePath,
- },
- StatusesConfig: &StatusesConfig{
- MaxChars: defaults.StatusesMaxChars,
- CWMaxChars: defaults.StatusesCWMaxChars,
- PollMaxOptions: defaults.StatusesPollMaxOptions,
- PollOptionMaxChars: defaults.StatusesPollOptionMaxChars,
- MaxMediaFiles: defaults.StatusesMaxMediaFiles,
- },
- LetsEncryptConfig: &LetsEncryptConfig{
- Enabled: defaults.LetsEncryptEnabled,
- Port: defaults.LetsEncryptPort,
- CertDir: defaults.LetsEncryptCertDir,
- EmailAddress: defaults.LetsEncryptEmailAddress,
- },
- OIDCConfig: &OIDCConfig{
- Enabled: defaults.OIDCEnabled,
- IDPName: defaults.OIDCIdpName,
- SkipVerification: defaults.OIDCSkipVerification,
- Issuer: defaults.OIDCIssuer,
- ClientID: defaults.OIDCClientID,
- ClientSecret: defaults.OIDCClientSecret,
- Scopes: defaults.OIDCScopes,
- },
- SMTPConfig: &SMTPConfig{
- Host: defaults.SMTPHost,
- Port: defaults.SMTPPort,
- Username: defaults.SMTPUsername,
- Password: defaults.SMTPPassword,
- From: defaults.SMTPFrom,
- },
- AccountCLIFlags: make(map[string]string),
- ExportCLIFlags: make(map[string]string),
- }
-}
-
-// GetDefaults returns a populated Defaults struct with most of the values set to reasonable defaults.
-// Note that if you use this function, you still need to set Host and, if desired, ConfigPath.
-func GetDefaults() Defaults {
- return Defaults{
- LogLevel: "info",
- ApplicationName: "gotosocial",
- ConfigPath: "",
- Host: "",
- AccountDomain: "",
- Protocol: "https",
- BindAddress: "0.0.0.0",
- Port: 8080,
- TrustedProxies: []string{"127.0.0.1/32"}, // localhost
-
- DbType: "postgres",
- DbAddress: "localhost",
- DbPort: 5432,
- DbUser: "postgres",
- DbPassword: "postgres",
- DbDatabase: "postgres",
- DBTlsMode: "disable",
- DBTlsCACert: "",
-
- TemplateBaseDir: "./web/template/",
- AssetBaseDir: "./web/assets/",
-
- AccountsOpenRegistration: true,
- AccountsRequireApproval: true,
- AccountsReasonRequired: true,
-
- MediaMaxImageSize: 2097152, // 2mb
- MediaMaxVideoSize: 10485760, // 10mb
- MediaMinDescriptionChars: 0,
- MediaMaxDescriptionChars: 500,
-
- StorageBackend: "local",
- StorageBasePath: "/gotosocial/storage",
- StorageServeProtocol: "https",
- StorageServeHost: "localhost",
- StorageServeBasePath: "/fileserver",
-
- StatusesMaxChars: 5000,
- StatusesCWMaxChars: 100,
- StatusesPollMaxOptions: 6,
- StatusesPollOptionMaxChars: 50,
- StatusesMaxMediaFiles: 6,
-
- LetsEncryptEnabled: true,
- LetsEncryptPort: 80,
- LetsEncryptCertDir: "/gotosocial/storage/certs",
- LetsEncryptEmailAddress: "",
-
- OIDCEnabled: false,
- OIDCIdpName: "",
- OIDCSkipVerification: false,
- OIDCIssuer: "",
- OIDCClientID: "",
- OIDCClientSecret: "",
- OIDCScopes: []string{oidc.ScopeOpenID, "profile", "email", "groups"},
-
- SMTPHost: "",
- SMTPPort: 0,
- SMTPUsername: "",
- SMTPPassword: "",
- SMTPFrom: "GoToSocial",
- }
-}
-
-// GetTestDefaults returns a Defaults struct with values set that are suitable for local testing.
-func GetTestDefaults() Defaults {
- return Defaults{
- LogLevel: "trace",
- ApplicationName: "gotosocial",
- ConfigPath: "",
- Host: "localhost:8080",
- AccountDomain: "localhost:8080",
- Protocol: "http",
- BindAddress: "127.0.0.1",
- Port: 8080,
- TrustedProxies: []string{"127.0.0.1/32"},
-
- DbType: "sqlite",
- DbAddress: ":memory:",
- DbPort: 5432,
- DbUser: "postgres",
- DbPassword: "postgres",
- DbDatabase: "postgres",
-
- TemplateBaseDir: "./web/template/",
- AssetBaseDir: "./web/assets/",
-
- AccountsOpenRegistration: true,
- AccountsRequireApproval: true,
- AccountsReasonRequired: true,
-
- MediaMaxImageSize: 1048576, // 1mb
- MediaMaxVideoSize: 5242880, // 5mb
- MediaMinDescriptionChars: 0,
- MediaMaxDescriptionChars: 500,
-
- StorageBackend: "local",
- StorageBasePath: "/gotosocial/storage",
- StorageServeProtocol: "http",
- StorageServeHost: "localhost:8080",
- StorageServeBasePath: "/fileserver",
-
- StatusesMaxChars: 5000,
- StatusesCWMaxChars: 100,
- StatusesPollMaxOptions: 6,
- StatusesPollOptionMaxChars: 50,
- StatusesMaxMediaFiles: 6,
-
- LetsEncryptEnabled: false,
- LetsEncryptPort: 0,
- LetsEncryptCertDir: "",
- LetsEncryptEmailAddress: "",
-
- OIDCEnabled: false,
- OIDCIdpName: "",
- OIDCSkipVerification: false,
- OIDCIssuer: "",
- OIDCClientID: "",
- OIDCClientSecret: "",
- OIDCScopes: []string{oidc.ScopeOpenID, "profile", "email", "groups"},
-
- SMTPHost: "",
- SMTPPort: 0,
- SMTPUsername: "",
- SMTPPassword: "",
- SMTPFrom: "GoToSocial",
- }
-}
diff --git a/internal/config/defaults.go b/internal/config/defaults.go
new file mode 100644
index 000000000..01eef8c8c
--- /dev/null
+++ b/internal/config/defaults.go
@@ -0,0 +1,87 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package config
+
+import "github.com/coreos/go-oidc/v3/oidc"
+
+// Defaults returns a populated Values struct with most of the values set to reasonable defaults.
+// Note that if you use this, you still need to set Host and, if desired, ConfigPath.
+var Defaults = Values{
+ LogLevel: "info",
+ ApplicationName: "gotosocial",
+ ConfigPath: "",
+ Host: "",
+ AccountDomain: "",
+ Protocol: "https",
+ BindAddress: "0.0.0.0",
+ Port: 8080,
+ TrustedProxies: []string{"127.0.0.1/32"}, // localhost
+
+ DbType: "postgres",
+ DbAddress: "localhost",
+ DbPort: 5432,
+ DbUser: "postgres",
+ DbPassword: "postgres",
+ DbDatabase: "postgres",
+ DbTLSMode: "disable",
+ DbTLSCACert: "",
+
+ WebTemplateBaseDir: "./web/template/",
+ WebAssetBaseDir: "./web/assets/",
+
+ AccountsRegistrationOpen: true,
+ AccountsApprovalRequired: true,
+ AccountsReasonRequired: true,
+
+ MediaImageMaxSize: 2097152, // 2mb
+ MediaVideoMaxSize: 10485760, // 10mb
+ MediaDescriptionMinChars: 0,
+ MediaDescriptionMaxChars: 500,
+
+ StorageBackend: "local",
+ StorageBasePath: "/gotosocial/storage",
+ StorageServeProtocol: "https",
+ StorageServeHost: "localhost",
+ StorageServeBasePath: "/fileserver",
+
+ StatusesMaxChars: 5000,
+ StatusesCWMaxChars: 100,
+ StatusesPollMaxOptions: 6,
+ StatusesPollOptionMaxChars: 50,
+ StatusesMediaMaxFiles: 6,
+
+ LetsEncryptEnabled: true,
+ LetsEncryptPort: 80,
+ LetsEncryptCertDir: "/gotosocial/storage/certs",
+ LetsEncryptEmailAddress: "",
+
+ OIDCEnabled: false,
+ OIDCIdpName: "",
+ OIDCSkipVerification: false,
+ OIDCIssuer: "",
+ OIDCClientID: "",
+ OIDCClientSecret: "",
+ OIDCScopes: []string{oidc.ScopeOpenID, "profile", "email", "groups"},
+
+ SMTPHost: "",
+ SMTPPort: 0,
+ SMTPUsername: "",
+ SMTPPassword: "",
+ SMTPFrom: "GoToSocial",
+}
diff --git a/internal/config/oidc.go b/internal/config/file.go
similarity index 60%
rename from internal/config/oidc.go
rename to internal/config/file.go
index 06158bbb7..62a3c0f6c 100644
--- a/internal/config/oidc.go
+++ b/internal/config/file.go
@@ -18,13 +18,21 @@
package config
-// OIDCConfig contains configuration values for openID connect (oauth) authorization by an external service such as Dex.
-type OIDCConfig struct {
- Enabled bool `yaml:"enabled"`
- IDPName string `yaml:"idpName"`
- SkipVerification bool `yaml:"skipVerification"`
- Issuer string `yaml:"issuer"`
- ClientID string `yaml:"clientID"`
- ClientSecret string `yaml:"clientSecret"`
- Scopes []string `yaml:"scopes"`
+import (
+ "github.com/spf13/viper"
+)
+
+// ReadFromFile checks if there's already a path to the config file set in viper.
+// If there is, it will attempt to read the config file into viper.
+func ReadFromFile() error {
+ // config file stuff
+ // check if we have a config path set (either by cli arg or env var)
+ if configPath := viper.GetString(Keys.ConfigPath); configPath != "" {
+ viper.SetConfigFile(configPath)
+ if err := viper.ReadInConfig(); err != nil {
+ return err
+ }
+ }
+
+ return nil
}
diff --git a/internal/config/keys.go b/internal/config/keys.go
new file mode 100644
index 000000000..80ca36381
--- /dev/null
+++ b/internal/config/keys.go
@@ -0,0 +1,175 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package config
+
+// KeyNames is a struct that just contains the names of configuration keys.
+type KeyNames struct {
+ // root
+ LogLevel string
+ ConfigPath string
+
+ // general
+ ApplicationName string
+ Host string
+ AccountDomain string
+ Protocol string
+ BindAddress string
+ Port string
+ TrustedProxies string
+ SoftwareVersion string
+
+ // database
+ DbType string
+ DbAddress string
+ DbPort string
+ DbUser string
+ DbPassword string
+ DbDatabase string
+ DbTLSMode string
+ DbTLSCACert string
+
+ // template
+ WebTemplateBaseDir string
+ WebAssetBaseDir string
+
+ // accounts
+ AccountsRegistrationOpen string
+ AccountsApprovalRequired string
+ AccountsReasonRequired string
+
+ // media
+ MediaImageMaxSize string
+ MediaVideoMaxSize string
+ MediaDescriptionMinChars string
+ MediaDescriptionMaxChars string
+
+ // storage
+ StorageBackend string
+ StorageBasePath string
+ StorageServeProtocol string
+ StorageServeHost string
+ StorageServeBasePath string
+
+ // statuses
+ StatusesMaxChars string
+ StatusesCWMaxChars string
+ StatusesPollMaxOptions string
+ StatusesPollOptionMaxChars string
+ StatusesMediaMaxFiles string
+
+ // letsencrypt
+ LetsEncryptEnabled string
+ LetsEncryptCertDir string
+ LetsEncryptEmailAddress string
+ LetsEncryptPort string
+
+ // oidc
+ OIDCEnabled string
+ OIDCIdpName string
+ OIDCSkipVerification string
+ OIDCIssuer string
+ OIDCClientID string
+ OIDCClientSecret string
+ OIDCScopes string
+
+ // smtp
+ SMTPHost string
+ SMTPPort string
+ SMTPUsername string
+ SMTPPassword string
+ SMTPFrom string
+
+ // admin
+ AdminAccountUsername string
+ AdminAccountEmail string
+ AdminAccountPassword string
+ AdminTransPath string
+}
+
+// Keys contains the names of the various keys used for initializing and storing flag variables,
+// and retrieving values from the viper config store.
+var Keys = KeyNames{
+ LogLevel: "log-level",
+ ApplicationName: "application-name",
+ ConfigPath: "config-path",
+ Host: "host",
+ AccountDomain: "account-domain",
+ Protocol: "protocol",
+ BindAddress: "bind-address",
+ Port: "port",
+ TrustedProxies: "trusted-proxies",
+ SoftwareVersion: "software-version",
+
+ DbType: "db-type",
+ DbAddress: "db-address",
+ DbPort: "db-port",
+ DbUser: "db-user",
+ DbPassword: "db-password",
+ DbDatabase: "db-database",
+ DbTLSMode: "db-tls-mode",
+ DbTLSCACert: "db-tls-ca-cert",
+
+ WebTemplateBaseDir: "web-template-base-dir",
+ WebAssetBaseDir: "web-asset-base-dir",
+
+ AccountsRegistrationOpen: "accounts-registration-open",
+ AccountsApprovalRequired: "accounts-approval-required",
+ AccountsReasonRequired: "accounts-reason-required",
+
+ MediaImageMaxSize: "media-image-max-size",
+ MediaVideoMaxSize: "media-video-max-size",
+ MediaDescriptionMinChars: "media-description-min-chars",
+ MediaDescriptionMaxChars: "media-description-max-chars",
+
+ StorageBackend: "storage-backend",
+ StorageBasePath: "storage-base-path",
+ StorageServeProtocol: "storage-serve-protocol",
+ StorageServeHost: "storage-serve-host",
+ StorageServeBasePath: "storage-serve-base-path",
+
+ StatusesMaxChars: "statuses-max-chars",
+ StatusesCWMaxChars: "statuses-cw-max-chars",
+ StatusesPollMaxOptions: "statuses-poll-max-options",
+ StatusesPollOptionMaxChars: "statuses-poll-option-max-chars",
+ StatusesMediaMaxFiles: "statuses-media-max-files",
+
+ LetsEncryptEnabled: "letsencrypt-enabled",
+ LetsEncryptPort: "letsencrypt-port",
+ LetsEncryptCertDir: "letsencrypt-cert-dir",
+ LetsEncryptEmailAddress: "letsencrypt-email-address",
+
+ OIDCEnabled: "oidc-enabled",
+ OIDCIdpName: "oidc-idp-name",
+ OIDCSkipVerification: "oidc-skip-verification",
+ OIDCIssuer: "oidc-issuer",
+ OIDCClientID: "oidc-client-id",
+ OIDCClientSecret: "oidc-client-secret",
+ OIDCScopes: "oidc-scopes",
+
+ SMTPHost: "smtp-host",
+ SMTPPort: "smtp-port",
+ SMTPUsername: "smtp-username",
+ SMTPPassword: "smtp-password",
+ SMTPFrom: "smtp-from",
+
+ AdminAccountUsername: "username",
+ AdminAccountEmail: "email",
+ AdminAccountPassword: "password",
+ AdminTransPath: "path",
+}
diff --git a/internal/config/letsencrypt.go b/internal/config/letsencrypt.go
deleted file mode 100644
index a71172635..000000000
--- a/internal/config/letsencrypt.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package config
-
-// LetsEncryptConfig wraps everything needed to manage letsencrypt certificates from within gotosocial.
-type LetsEncryptConfig struct {
- // Should letsencrypt certificate fetching be enabled?
- Enabled bool `yaml:"enabled"`
- // What port should the server listen for letsencrypt challenges on?
- Port int `yaml:"port"`
- // Where should certificates be stored?
- CertDir string `yaml:"certDir"`
- // Email address to pass to letsencrypt for notifications about certificate expiry etc.
- EmailAddress string `yaml:"emailAddress"`
-}
diff --git a/internal/config/media.go b/internal/config/media.go
deleted file mode 100644
index 136dba528..000000000
--- a/internal/config/media.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package config
-
-// MediaConfig contains configuration for receiving and parsing media files and attachments
-type MediaConfig struct {
- // Max size of uploaded images in bytes
- MaxImageSize int `yaml:"maxImageSize"`
- // Max size of uploaded video in bytes
- MaxVideoSize int `yaml:"maxVideoSize"`
- // Minimum amount of chars required in an image description
- MinDescriptionChars int `yaml:"minDescriptionChars"`
- // Max amount of chars allowed in an image description
- MaxDescriptionChars int `yaml:"maxDescriptionChars"`
-}
diff --git a/internal/config/smtp.go b/internal/config/smtp.go
deleted file mode 100644
index daa4967bf..000000000
--- a/internal/config/smtp.go
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package config
-
-// SMTPConfig holds configuration for sending emails using the smtp protocol.
-type SMTPConfig struct {
- // Host of the smtp server.
- Host string `yaml:"host"`
- // Port of the smtp server.
- Port int `yaml:"port"`
- // Username to use when authenticating with the smtp server.
- Username string `yaml:"username"`
- // Password to use when authenticating with the smtp server.
- Password string `yaml:"password"`
- // From address to use when sending emails.
- From string `yaml:"from"`
-}
diff --git a/internal/config/statuses.go b/internal/config/statuses.go
deleted file mode 100644
index fbb5225b4..000000000
--- a/internal/config/statuses.go
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package config
-
-// StatusesConfig pertains to posting/deleting/interacting with statuses
-type StatusesConfig struct {
- // Maximum amount of characters allowed in a status, excluding CW
- MaxChars int `yaml:"max_chars"`
- // Maximum amount of characters allowed in a content-warning/spoiler field
- CWMaxChars int `yaml:"cw_max_chars"`
- // Maximum number of options allowed in a poll
- PollMaxOptions int `yaml:"poll_max_options"`
- // Maximum characters allowed per poll option
- PollOptionMaxChars int `yaml:"poll_option_max_chars"`
- // Maximum amount of media files allowed to be attached to one status
- MaxMediaFiles int `yaml:"max_media_files"`
-}
diff --git a/internal/config/storage.go b/internal/config/storage.go
deleted file mode 100644
index 4a8ff79e4..000000000
--- a/internal/config/storage.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package config
-
-// StorageConfig contains configuration for storage and serving of media files and attachments
-type StorageConfig struct {
- // Type of storage backend to use: currently only 'local' is supported.
- // TODO: add S3 support here.
- Backend string `yaml:"backend"`
-
- // The base path for storing things. Should be an already-existing directory.
- BasePath string `yaml:"basePath"`
-
- // Protocol to use when *serving* media files from storage
- ServeProtocol string `yaml:"serveProtocol"`
- // Host to use when *serving* media files from storage
- ServeHost string `yaml:"serveHost"`
- // Base path to use when *serving* media files from storage
- ServeBasePath string `yaml:"serveBasePath"`
-}
diff --git a/internal/config/template.go b/internal/config/template.go
deleted file mode 100644
index 9c524471c..000000000
--- a/internal/config/template.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- GoToSocial
- Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-*/
-
-package config
-
-// TemplateConfig pertains to templating of web pages/email notifications and the like
-type TemplateConfig struct {
- // Directory from which gotosocial will attempt to load html templates (.tmpl files).
- BaseDir string `yaml:"baseDir"`
- // Directory from which static files are served
- AssetBaseDir string `yaml:"assetDir"`
-}
diff --git a/internal/config/values.go b/internal/config/values.go
new file mode 100644
index 000000000..387f934d8
--- /dev/null
+++ b/internal/config/values.go
@@ -0,0 +1,90 @@
+/*
+ GoToSocial
+ Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+*/
+
+package config
+
+// Values contains contains the type of each configuration value.
+type Values struct {
+ LogLevel string
+ ApplicationName string
+ ConfigPath string
+ Host string
+ AccountDomain string
+ Protocol string
+ BindAddress string
+ Port int
+ TrustedProxies []string
+ SoftwareVersion string
+
+ DbType string
+ DbAddress string
+ DbPort int
+ DbUser string
+ DbPassword string
+ DbDatabase string
+ DbTLSMode string
+ DbTLSCACert string
+
+ WebTemplateBaseDir string
+ WebAssetBaseDir string
+
+ AccountsRegistrationOpen bool
+ AccountsApprovalRequired bool
+ AccountsReasonRequired bool
+
+ MediaImageMaxSize int
+ MediaVideoMaxSize int
+ MediaDescriptionMinChars int
+ MediaDescriptionMaxChars int
+
+ StorageBackend string
+ StorageBasePath string
+ StorageServeProtocol string
+ StorageServeHost string
+ StorageServeBasePath string
+
+ StatusesMaxChars int
+ StatusesCWMaxChars int
+ StatusesPollMaxOptions int
+ StatusesPollOptionMaxChars int
+ StatusesMediaMaxFiles int
+
+ LetsEncryptEnabled bool
+ LetsEncryptCertDir string
+ LetsEncryptEmailAddress string
+ LetsEncryptPort int
+
+ OIDCEnabled bool
+ OIDCIdpName string
+ OIDCSkipVerification bool
+ OIDCIssuer string
+ OIDCClientID string
+ OIDCClientSecret string
+ OIDCScopes []string
+
+ SMTPHost string
+ SMTPPort int
+ SMTPUsername string
+ SMTPPassword string
+ SMTPFrom string
+
+ AdminAccountUsername string
+ AdminAccountEmail string
+ AdminAccountPassword string
+ AdminTransPath string
+}
diff --git a/internal/config/accounts.go b/internal/config/viper.go
similarity index 60%
rename from internal/config/accounts.go
rename to internal/config/viper.go
index 3fc9e900e..f53ef78d0 100644
--- a/internal/config/accounts.go
+++ b/internal/config/viper.go
@@ -18,12 +18,25 @@
package config
-// AccountsConfig contains configuration to do with creating accounts, new registrations, and defaults.
-type AccountsConfig struct {
- // Do we want people to be able to just submit sign up requests, or do we want invite only?
- OpenRegistration bool `yaml:"openRegistration"`
- // Do sign up requests require approval from an admin/moderator?
- RequireApproval bool `yaml:"requireApproval"`
- // Do we require a reason for a sign up or is an empty string OK?
- ReasonRequired bool `yaml:"reasonRequired"`
+import (
+ "strings"
+
+ "github.com/spf13/pflag"
+ "github.com/spf13/viper"
+)
+
+func InitViper(f *pflag.FlagSet) error {
+ // environment variable stuff
+ // flag 'some-flag-name' becomes env var 'GTS_SOME_FLAG_NAME'
+ viper.SetEnvPrefix("gts")
+ viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
+ viper.AutomaticEnv()
+
+ // flag stuff
+ // bind all of the flags in flagset to viper so that we can retrieve their values from the viper store
+ if err := viper.BindPFlags(f); err != nil {
+ return err
+ }
+
+ return nil
}
diff --git a/internal/db/bundb/account.go b/internal/db/bundb/account.go
index 9c9dcfc6a..ab6ec2b7c 100644
--- a/internal/db/bundb/account.go
+++ b/internal/db/bundb/account.go
@@ -24,6 +24,7 @@ import (
"fmt"
"time"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/cache"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
@@ -32,9 +33,8 @@ import (
)
type accountDB struct {
- config *config.Config
- conn *DBConn
- cache *cache.AccountCache
+ conn *DBConn
+ cache *cache.AccountCache
}
func (a *accountDB) newAccountQ(account *gtsmodel.Account) *bun.SelectQuery {
@@ -132,8 +132,9 @@ func (a *accountDB) GetInstanceAccount(ctx context.Context, domain string) (*gts
Where("account.username = ?", domain).
Where("account.domain = ?", domain)
} else {
+ host := viper.GetString(config.Keys.Host)
q = q.
- Where("account.username = ?", a.config.Host).
+ Where("account.username = ?", host).
WhereGroup(" AND ", whereEmptyOrNull("domain"))
}
diff --git a/internal/db/bundb/admin.go b/internal/db/bundb/admin.go
index accdf7e53..4b05fdd56 100644
--- a/internal/db/bundb/admin.go
+++ b/internal/db/bundb/admin.go
@@ -30,6 +30,7 @@ import (
"time"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/ap"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -41,8 +42,7 @@ import (
)
type adminDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (a *adminDB) IsUsernameAvailable(ctx context.Context, username string) (bool, db.Error) {
@@ -101,7 +101,7 @@ func (a *adminDB) NewSignup(ctx context.Context, username string, reason string,
Scan(ctx)
if err != nil {
// we just don't have an account yet create one
- newAccountURIs := util.GenerateURIsForAccount(username, a.config.Protocol, a.config.Host)
+ newAccountURIs := util.GenerateURIsForAccount(username)
newAccountID, err := id.NewRandomULID()
if err != nil {
return nil, err
@@ -176,7 +176,7 @@ func (a *adminDB) NewSignup(ctx context.Context, username string, reason string,
}
func (a *adminDB) CreateInstanceAccount(ctx context.Context) db.Error {
- username := a.config.Host
+ username := viper.GetString(config.Keys.Host)
q := a.conn.
NewSelect().
@@ -204,10 +204,10 @@ func (a *adminDB) CreateInstanceAccount(ctx context.Context) db.Error {
return err
}
- newAccountURIs := util.GenerateURIsForAccount(username, a.config.Protocol, a.config.Host)
+ newAccountURIs := util.GenerateURIsForAccount(username)
acct := >smodel.Account{
ID: aID,
- Username: a.config.Host,
+ Username: username,
DisplayName: username,
URL: newAccountURIs.UserURL,
PrivateKey: key,
@@ -235,13 +235,14 @@ func (a *adminDB) CreateInstanceAccount(ctx context.Context) db.Error {
}
func (a *adminDB) CreateInstanceInstance(ctx context.Context) db.Error {
- domain := a.config.Host
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
// check if instance entry already exists
q := a.conn.
NewSelect().
Model(>smodel.Instance{}).
- Where("domain = ?", domain)
+ Where("domain = ?", host)
exists, err := a.conn.Exists(ctx, q)
if err != nil {
@@ -259,9 +260,9 @@ func (a *adminDB) CreateInstanceInstance(ctx context.Context) db.Error {
i := >smodel.Instance{
ID: iID,
- Domain: domain,
- Title: domain,
- URI: fmt.Sprintf("%s://%s", a.config.Protocol, a.config.Host),
+ Domain: host,
+ Title: host,
+ URI: fmt.Sprintf("%s://%s", protocol, host),
}
insertQ := a.conn.
@@ -273,6 +274,6 @@ func (a *adminDB) CreateInstanceInstance(ctx context.Context) db.Error {
return a.conn.ProcessError(err)
}
- logrus.Infof("created instance instance %s with id %s", domain, i.ID)
+ logrus.Infof("created instance instance %s with id %s", host, i.ID)
return nil
}
diff --git a/internal/db/bundb/basic.go b/internal/db/bundb/basic.go
index 2ff8dcb9b..a4cff4202 100644
--- a/internal/db/bundb/basic.go
+++ b/internal/db/bundb/basic.go
@@ -24,15 +24,13 @@ import (
"github.com/sirupsen/logrus"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/uptrace/bun"
)
type basicDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (b *basicDB) Put(ctx context.Context, i interface{}) db.Error {
diff --git a/internal/db/bundb/bundb.go b/internal/db/bundb/bundb.go
index 4634f1981..bb159fad8 100644
--- a/internal/db/bundb/bundb.go
+++ b/internal/db/bundb/bundb.go
@@ -35,6 +35,7 @@ import (
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/stdlib"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/cache"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
@@ -52,6 +53,17 @@ import (
const (
dbTypePostgres = "postgres"
dbTypeSqlite = "sqlite"
+
+ // dbTLSModeDisable does not attempt to make a TLS connection to the database.
+ dbTLSModeDisable = "disable"
+ // dbTLSModeEnable attempts to make a TLS connection to the database, but doesn't fail if
+ // the certificate passed by the database isn't verified.
+ dbTLSModeEnable = "enable"
+ // dbTLSModeRequire attempts to make a TLS connection to the database, and requires
+ // that the certificate presented by the database is valid.
+ dbTLSModeRequire = "require"
+ // dbTLSModeUnset means that the TLS mode has not been set.
+ dbTLSModeUnset = ""
)
var registerTables = []interface{}{
@@ -73,8 +85,7 @@ type bunDBService struct {
db.Session
db.Status
db.Timeline
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func doMigration(ctx context.Context, db *bun.DB) error {
@@ -105,22 +116,24 @@ func doMigration(ctx context.Context, db *bun.DB) error {
// NewBunDBService returns a bunDB derived from the provided config, which implements the go-fed DB interface.
// Under the hood, it uses https://github.com/uptrace/bun to create and maintain a database connection.
-func NewBunDBService(ctx context.Context, c *config.Config) (db.DB, error) {
+func NewBunDBService(ctx context.Context) (db.DB, error) {
var conn *DBConn
var err error
- switch strings.ToLower(c.DBConfig.Type) {
+ dbType := strings.ToLower(viper.GetString(config.Keys.DbType))
+
+ switch dbType {
case dbTypePostgres:
- conn, err = pgConn(ctx, c)
+ conn, err = pgConn(ctx)
if err != nil {
return nil, err
}
case dbTypeSqlite:
- conn, err = sqliteConn(ctx, c)
+ conn, err = sqliteConn(ctx)
if err != nil {
return nil, err
}
default:
- return nil, fmt.Errorf("database type %s not supported for bundb", strings.ToLower(c.DBConfig.Type))
+ return nil, fmt.Errorf("database type %s not supported for bundb", dbType)
}
// add a hook to just log queries and the time they take
@@ -142,76 +155,66 @@ func NewBunDBService(ctx context.Context, c *config.Config) (db.DB, error) {
return nil, fmt.Errorf("db migration error: %s", err)
}
- accounts := &accountDB{config: c, conn: conn, cache: cache.NewAccountCache()}
+ accounts := &accountDB{conn: conn, cache: cache.NewAccountCache()}
ps := &bunDBService{
Account: accounts,
Admin: &adminDB{
- config: c,
- conn: conn,
+ conn: conn,
},
Basic: &basicDB{
- config: c,
- conn: conn,
+ conn: conn,
},
Domain: &domainDB{
- config: c,
- conn: conn,
+ conn: conn,
},
Instance: &instanceDB{
- config: c,
- conn: conn,
+ conn: conn,
},
Media: &mediaDB{
- config: c,
- conn: conn,
+ conn: conn,
},
Mention: &mentionDB{
- config: c,
- conn: conn,
- cache: ttlcache.NewCache(),
+ conn: conn,
+ cache: ttlcache.NewCache(),
},
Notification: ¬ificationDB{
- config: c,
- conn: conn,
- cache: ttlcache.NewCache(),
+ conn: conn,
+ cache: ttlcache.NewCache(),
},
Relationship: &relationshipDB{
- config: c,
- conn: conn,
+ conn: conn,
},
Session: &sessionDB{
- config: c,
- conn: conn,
+ conn: conn,
},
Status: &statusDB{
- config: c,
conn: conn,
cache: cache.NewStatusCache(),
accounts: accounts,
},
Timeline: &timelineDB{
- config: c,
- conn: conn,
+ conn: conn,
},
- config: c,
- conn: conn,
+ conn: conn,
}
// we can confidently return this useable service now
return ps, nil
}
-func sqliteConn(ctx context.Context, c *config.Config) (*DBConn, error) {
+func sqliteConn(ctx context.Context) (*DBConn, error) {
+ dbAddress := viper.GetString(config.Keys.DbAddress)
+
// Drop anything fancy from DB address
- c.DBConfig.Address = strings.Split(c.DBConfig.Address, "?")[0]
- c.DBConfig.Address = strings.TrimPrefix(c.DBConfig.Address, "file:")
+ dbAddress = strings.Split(dbAddress, "?")[0]
+ dbAddress = strings.TrimPrefix(dbAddress, "file:")
// Append our own SQLite preferences
- c.DBConfig.Address = "file:" + c.DBConfig.Address + "?cache=shared"
+ dbAddress = "file:" + dbAddress + "?cache=shared"
// Open new DB instance
- sqldb, err := sql.Open("sqlite", c.DBConfig.Address)
+ sqldb, err := sql.Open("sqlite", dbAddress)
if err != nil {
if errWithCode, ok := err.(*sqlite.Error); ok {
err = errors.New(sqlite.ErrorCodeString[errWithCode.Code()])
@@ -221,7 +224,7 @@ func sqliteConn(ctx context.Context, c *config.Config) (*DBConn, error) {
tweakConnectionValues(sqldb)
- if c.DBConfig.Address == "file::memory:?cache=shared" {
+ if dbAddress == "file::memory:?cache=shared" {
logrus.Warn("sqlite in-memory database should only be used for debugging")
// don't close connections on disconnect -- otherwise
// the SQLite database will be deleted when there
@@ -243,8 +246,8 @@ func sqliteConn(ctx context.Context, c *config.Config) (*DBConn, error) {
return conn, nil
}
-func pgConn(ctx context.Context, c *config.Config) (*DBConn, error) {
- opts, err := deriveBunDBPGOptions(c)
+func pgConn(ctx context.Context) (*DBConn, error) {
+ opts, err := deriveBunDBPGOptions()
if err != nil {
return nil, fmt.Errorf("could not create bundb postgres options: %s", err)
}
@@ -270,54 +273,63 @@ func pgConn(ctx context.Context, c *config.Config) (*DBConn, error) {
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
// with sensible defaults, or an error if it's not satisfied by the provided config.
-func deriveBunDBPGOptions(c *config.Config) (*pgx.ConnConfig, error) {
- if strings.ToUpper(c.DBConfig.Type) != db.DBTypePostgres {
- return nil, fmt.Errorf("expected db type of %s but got %s", db.DBTypePostgres, c.DBConfig.Type)
+func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
+ keys := config.Keys
+
+ if strings.ToUpper(viper.GetString(keys.DbType)) != db.DBTypePostgres {
+ return nil, fmt.Errorf("expected db type of %s but got %s", db.DBTypePostgres, viper.GetString(keys.DbType))
}
// validate port
- if c.DBConfig.Port == 0 {
+ port := viper.GetInt(keys.DbPort)
+ if port == 0 {
return nil, errors.New("no port set")
}
// validate address
- if c.DBConfig.Address == "" {
+ address := viper.GetString(keys.DbAddress)
+ if address == "" {
return nil, errors.New("no address set")
}
// validate username
- if c.DBConfig.User == "" {
+ username := viper.GetString(keys.DbUser)
+ if username == "" {
return nil, errors.New("no user set")
}
// validate that there's a password
- if c.DBConfig.Password == "" {
+ password := viper.GetString(keys.DbPassword)
+ if password == "" {
return nil, errors.New("no password set")
}
// validate database
- if c.DBConfig.Database == "" {
+ database := viper.GetString(keys.DbDatabase)
+ if database == "" {
return nil, errors.New("no database set")
}
var tlsConfig *tls.Config
- switch c.DBConfig.TLSMode {
- case config.DBTLSModeDisable, config.DBTLSModeUnset:
+ tlsMode := viper.GetString(keys.DbTLSMode)
+ switch tlsMode {
+ case dbTLSModeDisable, dbTLSModeUnset:
break // nothing to do
- case config.DBTLSModeEnable:
+ case dbTLSModeEnable:
/* #nosec G402 */
tlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
- case config.DBTLSModeRequire:
+ case dbTLSModeRequire:
tlsConfig = &tls.Config{
InsecureSkipVerify: false,
- ServerName: c.DBConfig.Address,
+ ServerName: viper.GetString(keys.DbAddress),
MinVersion: tls.VersionTLS12,
}
}
- if tlsConfig != nil && c.DBConfig.TLSCACert != "" {
+ caCertPath := viper.GetString(keys.DbTLSCACert)
+ if tlsConfig != nil && caCertPath != "" {
// load the system cert pool first -- we'll append the given CA cert to this
certPool, err := x509.SystemCertPool()
if err != nil {
@@ -325,24 +337,24 @@ func deriveBunDBPGOptions(c *config.Config) (*pgx.ConnConfig, error) {
}
// open the file itself and make sure there's something in it
- caCertBytes, err := os.ReadFile(c.DBConfig.TLSCACert)
+ caCertBytes, err := os.ReadFile(caCertPath)
if err != nil {
- return nil, fmt.Errorf("error opening CA certificate at %s: %s", c.DBConfig.TLSCACert, err)
+ return nil, fmt.Errorf("error opening CA certificate at %s: %s", caCertPath, err)
}
if len(caCertBytes) == 0 {
- return nil, fmt.Errorf("ca cert at %s was empty", c.DBConfig.TLSCACert)
+ return nil, fmt.Errorf("ca cert at %s was empty", caCertPath)
}
// make sure we have a PEM block
caPem, _ := pem.Decode(caCertBytes)
if caPem == nil {
- return nil, fmt.Errorf("could not parse cert at %s into PEM", c.DBConfig.TLSCACert)
+ return nil, fmt.Errorf("could not parse cert at %s into PEM", caCertPath)
}
// parse the PEM block into the certificate
caCert, err := x509.ParseCertificate(caPem.Bytes)
if err != nil {
- return nil, fmt.Errorf("could not parse cert at %s into x509 certificate: %s", c.DBConfig.TLSCACert, err)
+ return nil, fmt.Errorf("could not parse cert at %s into x509 certificate: %s", caCertPath, err)
}
// we're happy, add it to the existing pool and then use this pool in our tls config
@@ -351,14 +363,14 @@ func deriveBunDBPGOptions(c *config.Config) (*pgx.ConnConfig, error) {
}
cfg, _ := pgx.ParseConfig("")
- cfg.Host = c.DBConfig.Address
- cfg.Port = uint16(c.DBConfig.Port)
- cfg.User = c.DBConfig.User
- cfg.Password = c.DBConfig.Password
+ cfg.Host = address
+ cfg.Port = uint16(port)
+ cfg.User = username
+ cfg.Password = password
cfg.TLSConfig = tlsConfig
- cfg.Database = c.DBConfig.Database
+ cfg.Database = database
cfg.PreferSimpleProtocol = true
- cfg.RuntimeParams["application_name"] = c.ApplicationName
+ cfg.RuntimeParams["application_name"] = viper.GetString(keys.ApplicationName)
return cfg, nil
}
@@ -455,6 +467,9 @@ func (ps *bunDBService) MentionStringsToMentions(ctx context.Context, targetAcco
}
func (ps *bunDBService) TagStringsToTags(ctx context.Context, tags []string, originAccountID string) ([]*gtsmodel.Tag, error) {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+
newTags := []*gtsmodel.Tag{}
for _, t := range tags {
tag := >smodel.Tag{}
@@ -468,7 +483,7 @@ func (ps *bunDBService) TagStringsToTags(ctx context.Context, tags []string, ori
return nil, err
}
tag.ID = newID
- tag.URL = fmt.Sprintf("%s://%s/tags/%s", ps.config.Protocol, ps.config.Host, t)
+ tag.URL = fmt.Sprintf("%s://%s/tags/%s", protocol, host, t)
tag.Name = t
tag.FirstSeenFromAccountID = originAccountID
tag.CreatedAt = time.Now()
diff --git a/internal/db/bundb/bundb_test.go b/internal/db/bundb/bundb_test.go
index d4655a253..70fd58422 100644
--- a/internal/db/bundb/bundb_test.go
+++ b/internal/db/bundb/bundb_test.go
@@ -20,7 +20,6 @@ package bundb_test
import (
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/testrig"
@@ -29,8 +28,7 @@ import (
type BunDBStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
- db db.DB
+ db db.DB
// standard suite models
testTokens map[string]*gtsmodel.Token
@@ -57,10 +55,10 @@ func (suite *BunDBStandardTestSuite) SetupSuite() {
}
func (suite *BunDBStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+ suite.db = testrig.NewTestDB()
testrig.StandardDBSetup(suite.db, suite.testAccounts)
}
diff --git a/internal/db/bundb/domain.go b/internal/db/bundb/domain.go
index 5cb98e87e..9e6aa968d 100644
--- a/internal/db/bundb/domain.go
+++ b/internal/db/bundb/domain.go
@@ -22,15 +22,13 @@ import (
"context"
"net/url"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
type domainDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (d *domainDB) IsDomainBlocked(ctx context.Context, domain string) (bool, db.Error) {
diff --git a/internal/db/bundb/instance.go b/internal/db/bundb/instance.go
index 006a76e24..4c47a96f4 100644
--- a/internal/db/bundb/instance.go
+++ b/internal/db/bundb/instance.go
@@ -20,7 +20,9 @@ package bundb
import (
"context"
+
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
@@ -29,8 +31,7 @@ import (
)
type instanceDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (i *instanceDB) CountInstanceUsers(ctx context.Context, domain string) (int, db.Error) {
@@ -40,7 +41,8 @@ func (i *instanceDB) CountInstanceUsers(ctx context.Context, domain string) (int
Where("username != ?", domain).
Where("? IS NULL", bun.Ident("suspended_at"))
- if domain == i.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if domain == host {
// if the domain is *this* domain, just count where the domain field is null
q = q.WhereGroup(" AND ", whereEmptyOrNull("domain"))
} else {
@@ -59,7 +61,8 @@ func (i *instanceDB) CountInstanceStatuses(ctx context.Context, domain string) (
NewSelect().
Model(&[]*gtsmodel.Status{})
- if domain == i.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if domain == host {
// if the domain is *this* domain, just count where local is true
q = q.Where("local = ?", true)
} else {
@@ -80,7 +83,8 @@ func (i *instanceDB) CountInstanceDomains(ctx context.Context, domain string) (i
NewSelect().
Model(&[]*gtsmodel.Instance{})
- if domain == i.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if domain == host {
// if the domain is *this* domain, just count other instances it knows about
// exclude domains that are blocked
q = q.
diff --git a/internal/db/bundb/media.go b/internal/db/bundb/media.go
index 1a06a0260..5ea995a2d 100644
--- a/internal/db/bundb/media.go
+++ b/internal/db/bundb/media.go
@@ -21,15 +21,13 @@ package bundb
import (
"context"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/uptrace/bun"
)
type mediaDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (m *mediaDB) newMediaQ(i interface{}) *bun.SelectQuery {
diff --git a/internal/db/bundb/mention.go b/internal/db/bundb/mention.go
index 81ad216cc..f38b02a22 100644
--- a/internal/db/bundb/mention.go
+++ b/internal/db/bundb/mention.go
@@ -22,16 +22,14 @@ import (
"context"
"github.com/ReneKroon/ttlcache"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/uptrace/bun"
)
type mentionDB struct {
- config *config.Config
- conn *DBConn
- cache *ttlcache.Cache
+ conn *DBConn
+ cache *ttlcache.Cache
}
func (m *mentionDB) newMentionQ(i interface{}) *bun.SelectQuery {
diff --git a/internal/db/bundb/notification.go b/internal/db/bundb/notification.go
index 212c97467..e9203c030 100644
--- a/internal/db/bundb/notification.go
+++ b/internal/db/bundb/notification.go
@@ -22,16 +22,14 @@ import (
"context"
"github.com/ReneKroon/ttlcache"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/uptrace/bun"
)
type notificationDB struct {
- config *config.Config
- conn *DBConn
- cache *ttlcache.Cache
+ conn *DBConn
+ cache *ttlcache.Cache
}
func (n *notificationDB) newNotificationQ(i interface{}) *bun.SelectQuery {
diff --git a/internal/db/bundb/relationship.go b/internal/db/bundb/relationship.go
index a98a9f426..f8d1bccbe 100644
--- a/internal/db/bundb/relationship.go
+++ b/internal/db/bundb/relationship.go
@@ -23,15 +23,13 @@ import (
"database/sql"
"fmt"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/uptrace/bun"
)
type relationshipDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (r *relationshipDB) newBlockQ(block *gtsmodel.Block) *bun.SelectQuery {
diff --git a/internal/db/bundb/session.go b/internal/db/bundb/session.go
index acc70a868..60e582abd 100644
--- a/internal/db/bundb/session.go
+++ b/internal/db/bundb/session.go
@@ -23,15 +23,13 @@ import (
"crypto/rand"
"errors"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
)
type sessionDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (s *sessionDB) GetSession(ctx context.Context) (*gtsmodel.RouterSession, db.Error) {
diff --git a/internal/db/bundb/status.go b/internal/db/bundb/status.go
index 73e2cb4c0..8c157e77a 100644
--- a/internal/db/bundb/status.go
+++ b/internal/db/bundb/status.go
@@ -25,16 +25,14 @@ import (
"github.com/sirupsen/logrus"
"github.com/superseriousbusiness/gotosocial/internal/cache"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/uptrace/bun"
)
type statusDB struct {
- config *config.Config
- conn *DBConn
- cache *cache.StatusCache
+ conn *DBConn
+ cache *cache.StatusCache
// TODO: keep method definitions in same place but instead have receiver
// all point to one single "db" type, so they can all share methods
diff --git a/internal/db/bundb/timeline.go b/internal/db/bundb/timeline.go
index fb17fc53c..bb81d56cb 100644
--- a/internal/db/bundb/timeline.go
+++ b/internal/db/bundb/timeline.go
@@ -23,15 +23,13 @@ import (
"database/sql"
"sort"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/uptrace/bun"
)
type timelineDB struct {
- config *config.Config
- conn *DBConn
+ conn *DBConn
}
func (t *timelineDB) GetHomeTimeline(ctx context.Context, accountID string, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, db.Error) {
diff --git a/internal/email/noopsender.go b/internal/email/noopsender.go
index 82eb8db44..d2c9bbb1b 100644
--- a/internal/email/noopsender.go
+++ b/internal/email/noopsender.go
@@ -23,13 +23,17 @@ import (
"html/template"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
)
// NewNoopSender returns a no-op email sender that will just execute the given sendCallback
// every time it would otherwise send an email to the given toAddress with the given message value.
//
// Passing a nil function is also acceptable, in which case the send functions will just return nil.
-func NewNoopSender(templateBaseDir string, sendCallback func(toAddress string, message string)) (Sender, error) {
+func NewNoopSender(sendCallback func(toAddress string, message string)) (Sender, error) {
+ templateBaseDir := viper.GetString(config.Keys.WebTemplateBaseDir)
+
t, err := loadTemplates(templateBaseDir)
if err != nil {
return nil, err
diff --git a/internal/email/sender.go b/internal/email/sender.go
index a6a9abe55..546a5e9ce 100644
--- a/internal/email/sender.go
+++ b/internal/email/sender.go
@@ -23,6 +23,7 @@ import (
"html/template"
"net/smtp"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/config"
)
@@ -36,18 +37,25 @@ type Sender interface {
}
// NewSender returns a new email Sender interface with the given configuration, or an error if something goes wrong.
-func NewSender(cfg *config.Config) (Sender, error) {
- t, err := loadTemplates(cfg.TemplateConfig.BaseDir)
+func NewSender() (Sender, error) {
+ keys := config.Keys
+
+ templateBaseDir := viper.GetString(keys.WebTemplateBaseDir)
+ t, err := loadTemplates(templateBaseDir)
if err != nil {
return nil, err
}
- auth := smtp.PlainAuth("", cfg.SMTPConfig.Username, cfg.SMTPConfig.Password, cfg.SMTPConfig.Host)
+ username := viper.GetString(keys.SMTPUsername)
+ password := viper.GetString(keys.SMTPPassword)
+ host := viper.GetString(keys.SMTPHost)
+ port := viper.GetInt(keys.SMTPPort)
+ from := viper.GetString(keys.SMTPFrom)
return &sender{
- hostAddress: fmt.Sprintf("%s:%d", cfg.SMTPConfig.Host, cfg.SMTPConfig.Port),
- from: cfg.SMTPConfig.From,
- auth: auth,
+ hostAddress: fmt.Sprintf("%s:%d", host, port),
+ from: from,
+ auth: smtp.PlainAuth("", username, password, host),
template: t,
}, nil
}
diff --git a/internal/federation/authenticate.go b/internal/federation/authenticate.go
index fea5a765a..9d715c549 100644
--- a/internal/federation/authenticate.go
+++ b/internal/federation/authenticate.go
@@ -29,11 +29,13 @@ import (
"strings"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/go-fed/httpsig"
"github.com/superseriousbusiness/activity/pub"
"github.com/superseriousbusiness/activity/streams"
"github.com/superseriousbusiness/activity/streams/vocab"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/util"
@@ -155,7 +157,8 @@ func (f *federator) AuthenticateFederatedRequest(ctx context.Context, requestedU
requestingRemoteAccount := >smodel.Account{}
requestingLocalAccount := >smodel.Account{}
requestingHost := requestingPublicKeyID.Host
- if strings.EqualFold(requestingHost, f.config.Host) {
+ host := viper.GetString(config.Keys.Host)
+ if strings.EqualFold(requestingHost, host) {
// LOCAL ACCOUNT REQUEST
// the request is coming from INSIDE THE HOUSE so skip the remote dereferencing
l.Tracef("proceeding without dereference for local public key %s", requestingPublicKeyID)
diff --git a/internal/federation/dereferencing/dereferencer.go b/internal/federation/dereferencing/dereferencer.go
index 76fd830a2..c105665f3 100644
--- a/internal/federation/dereferencing/dereferencer.go
+++ b/internal/federation/dereferencing/dereferencer.go
@@ -24,7 +24,6 @@ import (
"sync"
"github.com/superseriousbusiness/gotosocial/internal/ap"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
@@ -82,19 +81,17 @@ type deref struct {
typeConverter typeutils.TypeConverter
transportController transport.Controller
mediaHandler media.Handler
- config *config.Config
handshakes map[string][]*url.URL
handshakeSync *sync.Mutex // mutex to lock/unlock when checking or updating the handshakes map
}
// NewDereferencer returns a Dereferencer initialized with the given parameters.
-func NewDereferencer(config *config.Config, db db.DB, typeConverter typeutils.TypeConverter, transportController transport.Controller, mediaHandler media.Handler) Dereferencer {
+func NewDereferencer(db db.DB, typeConverter typeutils.TypeConverter, transportController transport.Controller, mediaHandler media.Handler) Dereferencer {
return &deref{
db: db,
typeConverter: typeConverter,
transportController: transportController,
mediaHandler: mediaHandler,
- config: config,
handshakeSync: &sync.Mutex{},
}
}
diff --git a/internal/federation/dereferencing/dereferencer_test.go b/internal/federation/dereferencing/dereferencer_test.go
index d4bf3396d..8bcee8615 100644
--- a/internal/federation/dereferencing/dereferencer_test.go
+++ b/internal/federation/dereferencing/dereferencer_test.go
@@ -29,7 +29,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/activity/streams"
"github.com/superseriousbusiness/activity/streams/vocab"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation/dereferencing"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -39,7 +38,6 @@ import (
type DereferencerStandardTestSuite struct {
suite.Suite
- config *config.Config
db db.DB
storage *kv.KVStore
@@ -61,11 +59,12 @@ func (suite *DereferencerStandardTestSuite) SetupSuite() {
}
func (suite *DereferencerStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+
+ suite.db = testrig.NewTestDB()
suite.storage = testrig.NewTestStorage()
- suite.dereferencer = dereferencing.NewDereferencer(suite.config, suite.db, testrig.NewTestTypeConverter(suite.db), suite.mockTransportController(), testrig.NewTestMediaHandler(suite.db, suite.storage))
+ suite.dereferencer = dereferencing.NewDereferencer(suite.db, testrig.NewTestTypeConverter(suite.db), suite.mockTransportController(), testrig.NewTestMediaHandler(suite.db, suite.storage))
testrig.StandardDBSetup(suite.db, nil)
}
diff --git a/internal/federation/dereferencing/thread.go b/internal/federation/dereferencing/thread.go
index db1d336c6..209df32c4 100644
--- a/internal/federation/dereferencing/thread.go
+++ b/internal/federation/dereferencing/thread.go
@@ -24,7 +24,9 @@ import (
"net/url"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/ap"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
@@ -43,7 +45,8 @@ func (d *deref) DereferenceThread(ctx context.Context, username string, statusIR
l.Debug("entering DereferenceThread")
// if it's our status we already have everything stashed so we can bail early
- if statusIRI.Host == d.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if statusIRI.Host == host {
l.Debug("iri belongs to us, bailing")
return nil
}
@@ -77,7 +80,8 @@ func (d *deref) iterateAncestors(ctx context.Context, username string, statusIRI
l.Debug("entering iterateAncestors")
// if it's our status we don't need to dereference anything so we can immediately move up the chain
- if statusIRI.Host == d.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if statusIRI.Host == host {
l.Debug("iri belongs to us, moving up to next ancestor")
// since this is our status, we know we can extract the id from the status path
@@ -129,7 +133,8 @@ func (d *deref) iterateDescendants(ctx context.Context, username string, statusI
l.Debug("entering iterateDescendants")
// if it's our status we already have descendants stashed so we can bail early
- if statusIRI.Host == d.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if statusIRI.Host == host {
l.Debug("iri belongs to us, bailing")
return nil
}
@@ -205,7 +210,8 @@ pageLoop:
continue
}
- if itemURI.Host == d.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if itemURI.Host == host {
// skip if the reply is from us -- we already have it then
continue
}
diff --git a/internal/federation/federatingdb/db.go b/internal/federation/federatingdb/db.go
index 4b2f92fd4..64e5ad359 100644
--- a/internal/federation/federatingdb/db.go
+++ b/internal/federation/federatingdb/db.go
@@ -25,7 +25,6 @@ import (
"github.com/superseriousbusiness/activity/pub"
"github.com/superseriousbusiness/activity/streams/vocab"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
)
@@ -46,19 +45,17 @@ type federatingDB struct {
locks map[string]*mutex
pool sync.Pool
db db.DB
- config *config.Config
typeConverter typeutils.TypeConverter
}
// New returns a DB interface using the given database and config
-func New(db db.DB, config *config.Config) DB {
+func New(db db.DB) DB {
fdb := federatingDB{
mutex: sync.Mutex{},
locks: make(map[string]*mutex, 100),
pool: sync.Pool{New: func() interface{} { return &mutex{} }},
db: db,
- config: config,
- typeConverter: typeutils.NewConverter(config, db),
+ typeConverter: typeutils.NewConverter(db),
}
go fdb.cleanupLocks()
return &fdb
diff --git a/internal/federation/federatingdb/federatingdb_test.go b/internal/federation/federatingdb/federatingdb_test.go
index 24cbfed98..d51e0a825 100644
--- a/internal/federation/federatingdb/federatingdb_test.go
+++ b/internal/federation/federatingdb/federatingdb_test.go
@@ -22,7 +22,6 @@ import (
"context"
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation/federatingdb"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -34,7 +33,6 @@ import (
type FederatingDBTestSuite struct {
suite.Suite
- config *config.Config
db db.DB
tc typeutils.TypeConverter
federatingDB federatingdb.DB
@@ -59,13 +57,13 @@ func (suite *FederatingDBTestSuite) SetupSuite() {
suite.testAttachments = testrig.NewTestAttachments()
suite.testStatuses = testrig.NewTestStatuses()
suite.testBlocks = testrig.NewTestBlocks()
- suite.testActivities = testrig.NewTestActivities(suite.testAccounts)
}
func (suite *FederatingDBTestSuite) SetupTest() {
testrig.InitTestLog()
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestConfig()
suite.db = testrig.NewTestDB()
+ suite.testActivities = testrig.NewTestActivities(suite.testAccounts)
suite.tc = testrig.NewTestTypeConverter(suite.db)
suite.federatingDB = testrig.NewTestFederatingDB(suite.db)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
diff --git a/internal/federation/federatingdb/owns.go b/internal/federation/federatingdb/owns.go
index 8846c52bb..2603c9aa2 100644
--- a/internal/federation/federatingdb/owns.go
+++ b/internal/federation/federatingdb/owns.go
@@ -24,6 +24,8 @@ import (
"net/url"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/util"
@@ -42,8 +44,9 @@ func (f *federatingDB) Owns(ctx context.Context, id *url.URL) (bool, error) {
l.Debug("entering Owns")
// if the id host isn't this instance host, we don't own this IRI
- if id.Host != f.config.Host {
- l.Tracef("we DO NOT own activity because the host is %s not %s", id.Host, f.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ if id.Host != host {
+ l.Tracef("we DO NOT own activity because the host is %s not %s", id.Host, host)
return false, nil
}
diff --git a/internal/federation/federatingdb/reject_test.go b/internal/federation/federatingdb/reject_test.go
index 2b213a3f0..9930d83d2 100644
--- a/internal/federation/federatingdb/reject_test.go
+++ b/internal/federation/federatingdb/reject_test.go
@@ -48,7 +48,7 @@ func (suite *RejectTestSuite) TestRejectFollowRequest() {
ID: "01FJ1S8DX3STJJ6CEYPMZ1M0R3",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
- URI: util.GenerateURIForFollow(followingAccount.Username, "http", "localhost:8080", "01FJ1S8DX3STJJ6CEYPMZ1M0R3"),
+ URI: util.GenerateURIForFollow(followingAccount.Username, "01FJ1S8DX3STJJ6CEYPMZ1M0R3"),
AccountID: followingAccount.ID,
TargetAccountID: followedAccount.ID,
}
diff --git a/internal/federation/federatingdb/update.go b/internal/federation/federatingdb/update.go
index 8f318ce71..1d56b931f 100644
--- a/internal/federation/federatingdb/update.go
+++ b/internal/federation/federatingdb/update.go
@@ -24,8 +24,10 @@ import (
"fmt"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/activity/streams/vocab"
"github.com/superseriousbusiness/gotosocial/internal/ap"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/messages"
"github.com/superseriousbusiness/gotosocial/internal/util"
@@ -124,7 +126,8 @@ func (f *federatingDB) Update(ctx context.Context, asType vocab.Type) error {
return fmt.Errorf("UPDATE: error converting to account: %s", err)
}
- if updatedAcct.Domain == f.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if updatedAcct.Domain == host {
// no need to update local accounts
// in fact, if we do this will break the shit out of things so do NOT
return nil
diff --git a/internal/federation/federatingdb/util.go b/internal/federation/federatingdb/util.go
index f35fbbb2d..afa09e39d 100644
--- a/internal/federation/federatingdb/util.go
+++ b/internal/federation/federatingdb/util.go
@@ -26,9 +26,11 @@ import (
"net/url"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/activity/streams"
"github.com/superseriousbusiness/activity/streams/vocab"
"github.com/superseriousbusiness/gotosocial/internal/ap"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
@@ -104,7 +106,7 @@ func (f *federatingDB) NewID(ctx context.Context, t vocab.Type) (idURL *url.URL,
if err != nil {
return nil, err
}
- return url.Parse(util.GenerateURIForFollow(actorAccount.Username, f.config.Protocol, f.config.Host, newID))
+ return url.Parse(util.GenerateURIForFollow(actorAccount.Username, newID))
}
}
}
@@ -207,7 +209,10 @@ func (f *federatingDB) NewID(ctx context.Context, t vocab.Type) (idURL *url.URL,
if err != nil {
return nil, err
}
- return url.Parse(fmt.Sprintf("%s://%s/%s", f.config.Protocol, f.config.Host, newID))
+
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+ return url.Parse(fmt.Sprintf("%s://%s/%s", protocol, host, newID))
}
// ActorForOutbox fetches the actor's IRI for the given outbox IRI.
diff --git a/internal/federation/federator.go b/internal/federation/federator.go
index 280375d7d..8ccae9427 100644
--- a/internal/federation/federator.go
+++ b/internal/federation/federator.go
@@ -24,7 +24,6 @@ import (
"github.com/superseriousbusiness/activity/pub"
"github.com/superseriousbusiness/gotosocial/internal/ap"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation/dereferencing"
"github.com/superseriousbusiness/gotosocial/internal/federation/federatingdb"
@@ -73,7 +72,6 @@ type Federator interface {
}
type federator struct {
- config *config.Config
db db.DB
federatingDB federatingdb.DB
clock pub.Clock
@@ -85,12 +83,11 @@ type federator struct {
}
// NewFederator returns a new federator
-func NewFederator(db db.DB, federatingDB federatingdb.DB, transportController transport.Controller, config *config.Config, typeConverter typeutils.TypeConverter, mediaHandler media.Handler) Federator {
- dereferencer := dereferencing.NewDereferencer(config, db, typeConverter, transportController, mediaHandler)
+func NewFederator(db db.DB, federatingDB federatingdb.DB, transportController transport.Controller, typeConverter typeutils.TypeConverter, mediaHandler media.Handler) Federator {
+ dereferencer := dereferencing.NewDereferencer(db, typeConverter, transportController, mediaHandler)
clock := &Clock{}
f := &federator{
- config: config,
db: db,
federatingDB: federatingDB,
clock: &Clock{},
diff --git a/internal/federation/federator_test.go b/internal/federation/federator_test.go
index 7ac149c0d..f907ac00b 100644
--- a/internal/federation/federator_test.go
+++ b/internal/federation/federator_test.go
@@ -30,7 +30,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/activity/pub"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -41,7 +40,6 @@ import (
type ProtocolTestSuite struct {
suite.Suite
- config *config.Config
db db.DB
storage *kv.KVStore
typeConverter typeutils.TypeConverter
@@ -52,16 +50,16 @@ type ProtocolTestSuite struct {
// SetupSuite sets some variables on the suite that we can use as consts (more or less) throughout
func (suite *ProtocolTestSuite) SetupSuite() {
// setup standard items
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
- testrig.InitTestLog()
suite.storage = testrig.NewTestStorage()
suite.typeConverter = testrig.NewTestTypeConverter(suite.db)
suite.accounts = testrig.NewTestAccounts()
- suite.activities = testrig.NewTestActivities(suite.accounts)
}
func (suite *ProtocolTestSuite) SetupTest() {
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
+ suite.db = testrig.NewTestDB()
+ suite.activities = testrig.NewTestActivities(suite.accounts)
testrig.StandardDBSetup(suite.db, suite.accounts)
}
@@ -80,7 +78,7 @@ func (suite *ProtocolTestSuite) TestPostInboxRequestBodyHook() {
return nil, nil
}), suite.db)
// setup module being tested
- federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db), tc, suite.config, suite.typeConverter, testrig.NewTestMediaHandler(suite.db, suite.storage))
+ federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db), tc, suite.typeConverter, testrig.NewTestMediaHandler(suite.db, suite.storage))
// setup request
ctx := context.Background()
@@ -109,7 +107,7 @@ func (suite *ProtocolTestSuite) TestAuthenticatePostInbox() {
tc := testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db)
// now setup module being tested, with the mock transport controller
- federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db), tc, suite.config, suite.typeConverter, testrig.NewTestMediaHandler(suite.db, suite.storage))
+ federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db), tc, suite.typeConverter, testrig.NewTestMediaHandler(suite.db, suite.storage))
request := httptest.NewRequest(http.MethodPost, "http://localhost:8080/users/the_mighty_zork/inbox", nil)
// we need these headers for the request to be validated
diff --git a/internal/gotosocial/gotosocial.go b/internal/gotosocial/gotosocial.go
index e998c16f9..f32b6ad89 100644
--- a/internal/gotosocial/gotosocial.go
+++ b/internal/gotosocial/gotosocial.go
@@ -21,7 +21,6 @@ package gotosocial
import (
"context"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation"
"github.com/superseriousbusiness/gotosocial/internal/router"
@@ -42,12 +41,11 @@ type Server interface {
// NewServer returns a new gotosocial server, initialized with the given configuration.
// An error will be returned the caller if something goes wrong during initialization
// eg., no db or storage connection, port for router already in use, etc.
-func NewServer(db db.DB, apiRouter router.Router, federator federation.Federator, config *config.Config) (Server, error) {
+func NewServer(db db.DB, apiRouter router.Router, federator federation.Federator) (Server, error) {
return &gotosocial{
db: db,
apiRouter: apiRouter,
federator: federator,
- config: config,
}, nil
}
@@ -56,7 +54,6 @@ type gotosocial struct {
db db.DB
apiRouter router.Router
federator federation.Federator
- config *config.Config
}
// Start starts up the gotosocial server. If something goes wrong
diff --git a/internal/media/handler.go b/internal/media/handler.go
index b1063f9aa..24963e404 100644
--- a/internal/media/handler.go
+++ b/internal/media/handler.go
@@ -28,6 +28,7 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -84,15 +85,13 @@ type Handler interface {
}
type mediaHandler struct {
- config *config.Config
db db.DB
storage *kv.KVStore
}
-// New returns a new handler with the given config, db, and storage
-func New(config *config.Config, database db.DB, storage *kv.KVStore) Handler {
+// New returns a new handler with the given db and storage
+func New(database db.DB, storage *kv.KVStore) Handler {
return &mediaHandler{
- config: config,
db: database,
storage: storage,
}
@@ -179,6 +178,8 @@ func (mh *mediaHandler) ProcessAttachment(ctx context.Context, attachmentBytes [
// *gts.Emoji for it, then returns it to the caller. It's the caller's responsibility to put the returned struct
// in the database.
func (mh *mediaHandler) ProcessLocalEmoji(ctx context.Context, emojiBytes []byte, shortcode string) (*gtsmodel.Emoji, error) {
+ keys := config.Keys
+
var clean []byte
var err error
var original *imageAndMeta
@@ -234,7 +235,10 @@ func (mh *mediaHandler) ProcessLocalEmoji(ctx context.Context, emojiBytes []byte
extension := strings.Split(contentType, "/")[1]
// create the urls and storage paths
- URLbase := fmt.Sprintf("%s://%s%s", mh.config.StorageConfig.ServeProtocol, mh.config.StorageConfig.ServeHost, mh.config.StorageConfig.ServeBasePath)
+ serveProtocol := viper.GetString(keys.StorageServeProtocol)
+ serveHost := viper.GetString(keys.StorageServeHost)
+ serveBasePath := viper.GetString(keys.StorageServeBasePath)
+ URLbase := fmt.Sprintf("%s://%s%s", serveProtocol, serveHost, serveBasePath)
// generate a id for the new emoji
newEmojiID, err := id.NewRandomULID()
@@ -244,7 +248,9 @@ func (mh *mediaHandler) ProcessLocalEmoji(ctx context.Context, emojiBytes []byte
// webfinger uri for the emoji -- unrelated to actually serving the image
// will be something like https://example.org/emoji/70a7f3d7-7e35-4098-8ce3-9b5e8203bb9c
- emojiURI := fmt.Sprintf("%s://%s/%s/%s", mh.config.Protocol, mh.config.Host, Emoji, newEmojiID)
+ protocol := viper.GetString(keys.Protocol)
+ host := viper.GetString(keys.Host)
+ emojiURI := fmt.Sprintf("%s://%s/%s/%s", protocol, host, Emoji, newEmojiID)
// serve url and storage path for the original emoji -- can be png or gif
emojiURL := fmt.Sprintf("%s/%s/%s/%s/%s.%s", URLbase, instanceAccount.ID, Emoji, Original, newEmojiID, extension)
diff --git a/internal/media/processicon.go b/internal/media/processicon.go
index 46c721e0c..5f4f8b138 100644
--- a/internal/media/processicon.go
+++ b/internal/media/processicon.go
@@ -24,6 +24,8 @@ import (
"strings"
"time"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
)
@@ -79,7 +81,12 @@ func (mh *mediaHandler) processHeaderOrAvi(imageBytes []byte, contentType string
return nil, err
}
- URLbase := fmt.Sprintf("%s://%s%s", mh.config.StorageConfig.ServeProtocol, mh.config.StorageConfig.ServeHost, mh.config.StorageConfig.ServeBasePath)
+ keys := config.Keys
+ serveProtocol := viper.GetString(keys.StorageServeProtocol)
+ serveHost := viper.GetString(keys.StorageServeHost)
+ serveBasePath := viper.GetString(keys.StorageServeBasePath)
+
+ URLbase := fmt.Sprintf("%s://%s%s", serveProtocol, serveHost, serveBasePath)
originalURL := fmt.Sprintf("%s/%s/%s/original/%s.%s", URLbase, accountID, mediaType, newMediaID, extension)
smallURL := fmt.Sprintf("%s/%s/%s/small/%s.%s", URLbase, accountID, mediaType, newMediaID, extension)
diff --git a/internal/media/processimage.go b/internal/media/processimage.go
index 03bd4eda8..f3b520ad9 100644
--- a/internal/media/processimage.go
+++ b/internal/media/processimage.go
@@ -24,6 +24,8 @@ import (
"strings"
"time"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
)
@@ -67,7 +69,12 @@ func (mh *mediaHandler) processImageAttachment(data []byte, minAttachment *gtsmo
return nil, err
}
- URLbase := fmt.Sprintf("%s://%s%s", mh.config.StorageConfig.ServeProtocol, mh.config.StorageConfig.ServeHost, mh.config.StorageConfig.ServeBasePath)
+ keys := config.Keys
+ serveProtocol := viper.GetString(keys.StorageServeProtocol)
+ serveHost := viper.GetString(keys.StorageServeHost)
+ serveBasePath := viper.GetString(keys.StorageServeBasePath)
+
+ URLbase := fmt.Sprintf("%s://%s%s", serveProtocol, serveHost, serveBasePath)
originalURL := fmt.Sprintf("%s/%s/attachment/original/%s.%s", URLbase, minAttachment.AccountID, newMediaID, extension)
smallURL := fmt.Sprintf("%s/%s/attachment/small/%s.jpeg", URLbase, minAttachment.AccountID, newMediaID) // all thumbnails/smalls are encoded as jpeg
diff --git a/internal/oauth/clientstore_test.go b/internal/oauth/clientstore_test.go
index b0a36487b..fc74f3ad1 100644
--- a/internal/oauth/clientstore_test.go
+++ b/internal/oauth/clientstore_test.go
@@ -49,6 +49,8 @@ func (suite *PgClientStoreTestSuite) SetupSuite() {
// SetupTest creates a postgres connection and creates the oauth_clients table before each test
func (suite *PgClientStoreTestSuite) SetupTest() {
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
suite.db = testrig.NewTestDB()
testrig.StandardDBSetup(suite.db, nil)
}
diff --git a/internal/oidc/idp.go b/internal/oidc/idp.go
index 3032d15d5..166fc4b85 100644
--- a/internal/oidc/idp.go
+++ b/internal/oidc/idp.go
@@ -23,6 +23,7 @@ import (
"fmt"
"github.com/coreos/go-oidc/v3/oidc"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/config"
"golang.org/x/oauth2"
)
@@ -54,42 +55,56 @@ type idp struct {
// If the passed config contains a nil value for the OIDCConfig, or OIDCConfig.Enabled
// is set to false, then nil, nil will be returned. If OIDCConfig.Enabled is true,
// then the other OIDC config fields must also be set.
-func NewIDP(ctx context.Context, config *config.Config) (IDP, error) {
+func NewIDP(ctx context.Context) (IDP, error) {
+ keys := config.Keys
- // oidc isn't enabled so we don't need to do anything
- if config.OIDCConfig == nil || !config.OIDCConfig.Enabled {
+ oidcEnabled := viper.GetBool(keys.OIDCEnabled)
+ if !oidcEnabled {
+ // oidc isn't enabled so we don't need to do anything
return nil, nil
}
// validate config fields
- if config.OIDCConfig.IDPName == "" {
+ idpName := viper.GetString(keys.OIDCIdpName)
+ if idpName == "" {
return nil, fmt.Errorf("not set: IDPName")
}
- if config.OIDCConfig.Issuer == "" {
+
+ issuer := viper.GetString(keys.OIDCIssuer)
+ if issuer == "" {
return nil, fmt.Errorf("not set: Issuer")
}
- if config.OIDCConfig.ClientID == "" {
+
+ clientID := viper.GetString(keys.OIDCClientID)
+ if clientID == "" {
return nil, fmt.Errorf("not set: ClientID")
}
- if config.OIDCConfig.ClientSecret == "" {
+
+ clientSecret := viper.GetString(keys.OIDCClientSecret)
+ if clientSecret == "" {
return nil, fmt.Errorf("not set: ClientSecret")
}
- if len(config.OIDCConfig.Scopes) == 0 {
+
+ scopes := viper.GetStringSlice(keys.OIDCScopes)
+ if len(scopes) == 0 {
return nil, fmt.Errorf("not set: Scopes")
}
- provider, err := oidc.NewProvider(ctx, config.OIDCConfig.Issuer)
+ provider, err := oidc.NewProvider(ctx, issuer)
if err != nil {
return nil, err
}
+ protocol := viper.GetString(keys.Protocol)
+ host := viper.GetString(keys.Host)
+
oauth2Config := oauth2.Config{
// client_id and client_secret of the client.
- ClientID: config.OIDCConfig.ClientID,
- ClientSecret: config.OIDCConfig.ClientSecret,
+ ClientID: clientID,
+ ClientSecret: clientSecret,
// The redirectURL.
- RedirectURL: fmt.Sprintf("%s://%s%s", config.Protocol, config.Host, CallbackPath),
+ RedirectURL: fmt.Sprintf("%s://%s%s", protocol, host, CallbackPath),
// Discovery returns the OAuth2 endpoints.
Endpoint: provider.Endpoint(),
@@ -97,14 +112,16 @@ func NewIDP(ctx context.Context, config *config.Config) (IDP, error) {
// "openid" is a required scope for OpenID Connect flows.
//
// Other scopes, such as "groups" can be requested.
- Scopes: config.OIDCConfig.Scopes,
+ Scopes: scopes,
}
// create a config for verifier creation
oidcConf := &oidc.Config{
- ClientID: config.OIDCConfig.ClientID,
+ ClientID: clientID,
}
- if config.OIDCConfig.SkipVerification {
+
+ skipVerification := viper.GetBool(keys.OIDCSkipVerification)
+ if skipVerification {
oidcConf.SkipClientIDCheck = true
oidcConf.SkipExpiryCheck = true
oidcConf.SkipIssuerCheck = true
diff --git a/internal/processing/account/account.go b/internal/processing/account/account.go
index 4e807540c..c37261adc 100644
--- a/internal/processing/account/account.go
+++ b/internal/processing/account/account.go
@@ -23,7 +23,6 @@ import (
"mime/multipart"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
@@ -78,7 +77,6 @@ type Processor interface {
type processor struct {
tc typeutils.TypeConverter
- config *config.Config
mediaHandler media.Handler
fromClientAPI chan messages.FromClientAPI
oauthServer oauth.Server
@@ -89,15 +87,14 @@ type processor struct {
}
// New returns a new account processor.
-func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, oauthServer oauth.Server, fromClientAPI chan messages.FromClientAPI, federator federation.Federator, config *config.Config) Processor {
+func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, oauthServer oauth.Server, fromClientAPI chan messages.FromClientAPI, federator federation.Federator) Processor {
return &processor{
tc: tc,
- config: config,
mediaHandler: mediaHandler,
fromClientAPI: fromClientAPI,
oauthServer: oauthServer,
filter: visibility.NewFilter(db),
- formatter: text.NewFormatter(config, db),
+ formatter: text.NewFormatter(db),
db: db,
federator: federator,
}
diff --git a/internal/processing/account/account_test.go b/internal/processing/account/account_test.go
index 9bc97a77b..17893b4c0 100644
--- a/internal/processing/account/account_test.go
+++ b/internal/processing/account/account_test.go
@@ -22,7 +22,6 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/activity/pub"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -39,7 +38,6 @@ import (
type AccountStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
tc typeutils.TypeConverter
storage *kv.KVStore
@@ -76,9 +74,10 @@ func (suite *AccountStandardTestSuite) SetupSuite() {
}
func (suite *AccountStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+
+ suite.db = testrig.NewTestDB()
suite.tc = testrig.NewTestTypeConverter(suite.db)
suite.storage = testrig.NewTestStorage()
suite.mediaHandler = testrig.NewTestMediaHandler(suite.db, suite.storage)
@@ -89,7 +88,7 @@ func (suite *AccountStandardTestSuite) SetupTest() {
suite.federator = testrig.NewTestFederator(suite.db, suite.transportController, suite.storage)
suite.sentEmails = make(map[string]string)
suite.emailSender = testrig.NewEmailSender("../../../web/template/", suite.sentEmails)
- suite.accountProcessor = account.New(suite.db, suite.tc, suite.mediaHandler, suite.oauthServer, suite.fromClientAPIChan, suite.federator, suite.config)
+ suite.accountProcessor = account.New(suite.db, suite.tc, suite.mediaHandler, suite.oauthServer, suite.fromClientAPIChan, suite.federator)
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../testrig/media")
}
diff --git a/internal/processing/account/create.go b/internal/processing/account/create.go
index a13e84a78..3d024417b 100644
--- a/internal/processing/account/create.go
+++ b/internal/processing/account/create.go
@@ -23,9 +23,11 @@ import (
"fmt"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/ap"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/messages"
"github.com/superseriousbusiness/gotosocial/internal/text"
@@ -51,14 +53,18 @@ func (p *processor) Create(ctx context.Context, applicationToken oauth2.TokenInf
return nil, fmt.Errorf("username %s in use", form.Username)
}
+ keys := config.Keys
+ reasonRequired := viper.GetBool(keys.AccountsReasonRequired)
+ approvalRequired := viper.GetBool(keys.AccountsApprovalRequired)
+
// don't store a reason if we don't require one
reason := form.Reason
- if !p.config.AccountsConfig.ReasonRequired {
+ if !reasonRequired {
reason = ""
}
l.Trace("creating new username and account")
- user, err := p.db.NewSignup(ctx, form.Username, text.RemoveHTML(reason), p.config.AccountsConfig.RequireApproval, form.Email, form.Password, form.IP, form.Locale, application.ID, false, false)
+ user, err := p.db.NewSignup(ctx, form.Username, text.RemoveHTML(reason), approvalRequired, form.Email, form.Password, form.IP, form.Locale, application.ID, false, false)
if err != nil {
return nil, fmt.Errorf("error creating new signup in the database: %s", err)
}
diff --git a/internal/processing/account/createblock.go b/internal/processing/account/createblock.go
index 347f19bee..6785ffed1 100644
--- a/internal/processing/account/createblock.go
+++ b/internal/processing/account/createblock.go
@@ -57,7 +57,7 @@ func (p *processor) BlockCreate(ctx context.Context, requestingAccount *gtsmodel
block.Account = requestingAccount
block.TargetAccountID = targetAccountID
block.TargetAccount = targetAccount
- block.URI = util.GenerateURIForBlock(requestingAccount.Username, p.config.Protocol, p.config.Host, newBlockID)
+ block.URI = util.GenerateURIForBlock(requestingAccount.Username, newBlockID)
// whack it in the database
if err := p.db.Put(ctx, block); err != nil {
diff --git a/internal/processing/account/createfollow.go b/internal/processing/account/createfollow.go
index d3ca386ed..9b082187b 100644
--- a/internal/processing/account/createfollow.go
+++ b/internal/processing/account/createfollow.go
@@ -76,7 +76,7 @@ func (p *processor) FollowCreate(ctx context.Context, requestingAccount *gtsmode
AccountID: requestingAccount.ID,
TargetAccountID: form.ID,
ShowReblogs: true,
- URI: util.GenerateURIForFollow(requestingAccount.Username, p.config.Protocol, p.config.Host, newFollowID),
+ URI: util.GenerateURIForFollow(requestingAccount.Username, newFollowID),
Notify: false,
}
if form.Reblogs != nil {
diff --git a/internal/processing/account/update.go b/internal/processing/account/update.go
index 54bf372e7..ca386eb39 100644
--- a/internal/processing/account/update.go
+++ b/internal/processing/account/update.go
@@ -23,12 +23,15 @@ import (
"context"
"errors"
"fmt"
- "github.com/sirupsen/logrus"
"io"
"mime/multipart"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
"github.com/superseriousbusiness/gotosocial/internal/ap"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/messages"
@@ -135,8 +138,9 @@ func (p *processor) Update(ctx context.Context, account *gtsmodel.Account, form
// the account's new avatar image.
func (p *processor) UpdateAvatar(ctx context.Context, avatar *multipart.FileHeader, accountID string) (*gtsmodel.MediaAttachment, error) {
var err error
- if int(avatar.Size) > p.config.MediaConfig.MaxImageSize {
- err = fmt.Errorf("avatar with size %d exceeded max image size of %d bytes", avatar.Size, p.config.MediaConfig.MaxImageSize)
+ maxImageSize := viper.GetInt(config.Keys.MediaImageMaxSize)
+ if int(avatar.Size) > maxImageSize {
+ err = fmt.Errorf("avatar with size %d exceeded max image size of %d bytes", avatar.Size, maxImageSize)
return nil, err
}
f, err := avatar.Open()
@@ -168,8 +172,9 @@ func (p *processor) UpdateAvatar(ctx context.Context, avatar *multipart.FileHead
// the account's new header image.
func (p *processor) UpdateHeader(ctx context.Context, header *multipart.FileHeader, accountID string) (*gtsmodel.MediaAttachment, error) {
var err error
- if int(header.Size) > p.config.MediaConfig.MaxImageSize {
- err = fmt.Errorf("header with size %d exceeded max image size of %d bytes", header.Size, p.config.MediaConfig.MaxImageSize)
+ maxImageSize := viper.GetInt(config.Keys.MediaImageMaxSize)
+ if int(header.Size) > maxImageSize {
+ err = fmt.Errorf("header with size %d exceeded max image size of %d bytes", header.Size, maxImageSize)
return nil, err
}
f, err := header.Open()
diff --git a/internal/processing/admin/admin.go b/internal/processing/admin/admin.go
index 1aaaced5e..8b46ab6b7 100644
--- a/internal/processing/admin/admin.go
+++ b/internal/processing/admin/admin.go
@@ -23,7 +23,6 @@ import (
"mime/multipart"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -44,17 +43,15 @@ type Processor interface {
type processor struct {
tc typeutils.TypeConverter
- config *config.Config
mediaHandler media.Handler
fromClientAPI chan messages.FromClientAPI
db db.DB
}
// New returns a new admin processor.
-func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, fromClientAPI chan messages.FromClientAPI, config *config.Config) Processor {
+func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, fromClientAPI chan messages.FromClientAPI) Processor {
return &processor{
tc: tc,
- config: config,
mediaHandler: mediaHandler,
fromClientAPI: fromClientAPI,
db: db,
diff --git a/internal/processing/blocks.go b/internal/processing/blocks.go
index 1144579a4..ad5553be9 100644
--- a/internal/processing/blocks.go
+++ b/internal/processing/blocks.go
@@ -23,7 +23,9 @@ import (
"fmt"
"net/url"
+ "github.com/spf13/viper"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
@@ -62,17 +64,20 @@ func (p *processor) packageBlocksResponse(accounts []*apimodel.Account, path str
// prepare the next and previous links
if len(accounts) != 0 {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+
nextLink := &url.URL{
- Scheme: p.config.Protocol,
- Host: p.config.Host,
+ Scheme: protocol,
+ Host: host,
Path: path,
RawQuery: fmt.Sprintf("limit=%d&max_id=%s", limit, nextMaxID),
}
next := fmt.Sprintf("<%s>; rel=\"next\"", nextLink.String())
prevLink := &url.URL{
- Scheme: p.config.Protocol,
- Host: p.config.Host,
+ Scheme: protocol,
+ Host: host,
Path: path,
RawQuery: fmt.Sprintf("limit=%d&min_id=%s", limit, prevMinID),
}
diff --git a/internal/processing/federation/federation.go b/internal/processing/federation/federation.go
index b050406c5..a3e25c2aa 100644
--- a/internal/processing/federation/federation.go
+++ b/internal/processing/federation/federation.go
@@ -24,7 +24,6 @@ import (
"net/url"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
@@ -83,7 +82,6 @@ type Processor interface {
type processor struct {
db db.DB
- config *config.Config
federator federation.Federator
tc typeutils.TypeConverter
filter visibility.Filter
@@ -91,10 +89,9 @@ type processor struct {
}
// New returns a new federation processor.
-func New(db db.DB, tc typeutils.TypeConverter, config *config.Config, federator federation.Federator, fromFederator chan messages.FromFederator) Processor {
+func New(db db.DB, tc typeutils.TypeConverter, federator federation.Federator, fromFederator chan messages.FromFederator) Processor {
return &processor{
db: db,
- config: config,
federator: federator,
tc: tc,
filter: visibility.NewFilter(db),
diff --git a/internal/processing/federation/getnodeinfo.go b/internal/processing/federation/getnodeinfo.go
index 4eb8ad3b7..78e03a744 100644
--- a/internal/processing/federation/getnodeinfo.go
+++ b/internal/processing/federation/getnodeinfo.go
@@ -23,7 +23,9 @@ import (
"fmt"
"net/http"
+ "github.com/spf13/viper"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
)
@@ -38,29 +40,35 @@ var (
)
func (p *processor) GetNodeInfoRel(ctx context.Context, request *http.Request) (*apimodel.WellKnownResponse, gtserror.WithCode) {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+
return &apimodel.WellKnownResponse{
Links: []apimodel.Link{
{
Rel: nodeInfoRel,
- Href: fmt.Sprintf("%s://%s/nodeinfo/%s", p.config.Protocol, p.config.Host, nodeInfoVersion),
+ Href: fmt.Sprintf("%s://%s/nodeinfo/%s", protocol, host, nodeInfoVersion),
},
},
}, nil
}
func (p *processor) GetNodeInfo(ctx context.Context, request *http.Request) (*apimodel.Nodeinfo, gtserror.WithCode) {
+ openRegistration := viper.GetBool(config.Keys.AccountsRegistrationOpen)
+ softwareVersion := viper.GetString(config.Keys.SoftwareVersion)
+
return &apimodel.Nodeinfo{
Version: nodeInfoVersion,
Software: apimodel.NodeInfoSoftware{
Name: nodeInfoSoftwareName,
- Version: p.config.SoftwareVersion,
+ Version: softwareVersion,
},
Protocols: nodeInfoProtocols,
Services: apimodel.NodeInfoServices{
Inbound: []string{},
Outbound: []string{},
},
- OpenRegistrations: p.config.AccountsConfig.OpenRegistration,
+ OpenRegistrations: openRegistration,
Usage: apimodel.NodeInfoUsage{
Users: apimodel.NodeInfoUsers{},
},
diff --git a/internal/processing/federation/getwebfinger.go b/internal/processing/federation/getwebfinger.go
index aaa7687be..e16b3e139 100644
--- a/internal/processing/federation/getwebfinger.go
+++ b/internal/processing/federation/getwebfinger.go
@@ -22,7 +22,9 @@ import (
"context"
"fmt"
+ "github.com/spf13/viper"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
)
@@ -41,9 +43,11 @@ func (p *processor) GetWebfingerAccount(ctx context.Context, requestedUsername s
return nil, gtserror.NewErrorNotFound(fmt.Errorf("database error getting account with username %s: %s", requestedUsername, err))
}
+ accountDomain := viper.GetString(config.Keys.AccountDomain)
+
// return the webfinger representation
return &apimodel.WellKnownResponse{
- Subject: fmt.Sprintf("%s:%s@%s", webfingerAccount, requestedAccount.Username, p.config.AccountDomain),
+ Subject: fmt.Sprintf("%s:%s@%s", webfingerAccount, requestedAccount.Username, accountDomain),
Aliases: []string{
requestedAccount.URI,
requestedAccount.URL,
diff --git a/internal/processing/instance.go b/internal/processing/instance.go
index 75d17d13a..995a841e3 100644
--- a/internal/processing/instance.go
+++ b/internal/processing/instance.go
@@ -22,7 +22,9 @@ import (
"context"
"fmt"
+ "github.com/spf13/viper"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -33,7 +35,7 @@ import (
func (p *processor) InstanceGet(ctx context.Context, domain string) (*apimodel.Instance, gtserror.WithCode) {
i := >smodel.Instance{}
if err := p.db.GetWhere(ctx, []db.Where{{Key: "domain", Value: domain}}, i); err != nil {
- return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error fetching instance %s: %s", p.config.Host, err))
+ return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error fetching instance %s: %s", domain, err))
}
ai, err := p.tc.InstanceToAPIInstance(ctx, i)
@@ -47,14 +49,15 @@ func (p *processor) InstanceGet(ctx context.Context, domain string) (*apimodel.I
func (p *processor) InstancePatch(ctx context.Context, form *apimodel.InstanceSettingsUpdateRequest) (*apimodel.Instance, gtserror.WithCode) {
// fetch the instance entry from the db for processing
i := >smodel.Instance{}
- if err := p.db.GetWhere(ctx, []db.Where{{Key: "domain", Value: p.config.Host}}, i); err != nil {
- return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error fetching instance %s: %s", p.config.Host, err))
+ host := viper.GetString(config.Keys.Host)
+ if err := p.db.GetWhere(ctx, []db.Where{{Key: "domain", Value: host}}, i); err != nil {
+ return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error fetching instance %s: %s", host, err))
}
// fetch the instance account from the db for processing
ia, err := p.db.GetInstanceAccount(ctx, "")
if err != nil {
- return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error fetching instance account %s: %s", p.config.Host, err))
+ return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error fetching instance account %s: %s", host, err))
}
// validate & update site title if it's set on the form
@@ -148,7 +151,7 @@ func (p *processor) InstancePatch(ctx context.Context, form *apimodel.InstanceSe
}
if err := p.db.UpdateByPrimaryKey(ctx, i); err != nil {
- return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error updating instance %s: %s", p.config.Host, err))
+ return nil, gtserror.NewErrorInternalError(fmt.Errorf("db error updating instance %s: %s", host, err))
}
ai, err := p.tc.InstanceToAPIInstance(ctx, i)
diff --git a/internal/processing/media/media.go b/internal/processing/media/media.go
index cf8ec8562..54dbd1478 100644
--- a/internal/processing/media/media.go
+++ b/internal/processing/media/media.go
@@ -23,7 +23,6 @@ import (
"codeberg.org/gruf/go-store/kv"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -44,17 +43,15 @@ type Processor interface {
type processor struct {
tc typeutils.TypeConverter
- config *config.Config
mediaHandler media.Handler
storage *kv.KVStore
db db.DB
}
// New returns a new media processor.
-func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, storage *kv.KVStore, config *config.Config) Processor {
+func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, storage *kv.KVStore) Processor {
return &processor{
tc: tc,
- config: config,
mediaHandler: mediaHandler,
storage: storage,
db: db,
diff --git a/internal/processing/processor.go b/internal/processing/processor.go
index a65ef23d9..ebfabb0de 100644
--- a/internal/processing/processor.go
+++ b/internal/processing/processor.go
@@ -26,7 +26,6 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/sirupsen/logrus"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -234,7 +233,6 @@ type processor struct {
fromFederator chan messages.FromFederator
federator federation.Federator
stop chan interface{}
- config *config.Config
tc typeutils.TypeConverter
oauthServer oauth.Server
mediaHandler media.Handler
@@ -258,7 +256,6 @@ type processor struct {
// NewProcessor returns a new Processor.
func NewProcessor(
- config *config.Config,
tc typeutils.TypeConverter,
federator federation.Federator,
oauthServer oauth.Server,
@@ -270,20 +267,19 @@ func NewProcessor(
fromClientAPI := make(chan messages.FromClientAPI, 1000)
fromFederator := make(chan messages.FromFederator, 1000)
- statusProcessor := status.New(db, tc, config, fromClientAPI)
+ statusProcessor := status.New(db, tc, fromClientAPI)
streamingProcessor := streaming.New(db, oauthServer)
- accountProcessor := account.New(db, tc, mediaHandler, oauthServer, fromClientAPI, federator, config)
- adminProcessor := admin.New(db, tc, mediaHandler, fromClientAPI, config)
- mediaProcessor := mediaProcessor.New(db, tc, mediaHandler, storage, config)
- userProcessor := user.New(db, emailSender, config)
- federationProcessor := federationProcessor.New(db, tc, config, federator, fromFederator)
+ accountProcessor := account.New(db, tc, mediaHandler, oauthServer, fromClientAPI, federator)
+ adminProcessor := admin.New(db, tc, mediaHandler, fromClientAPI)
+ mediaProcessor := mediaProcessor.New(db, tc, mediaHandler, storage)
+ userProcessor := user.New(db, emailSender)
+ federationProcessor := federationProcessor.New(db, tc, federator, fromFederator)
return &processor{
fromClientAPI: fromClientAPI,
fromFederator: fromFederator,
federator: federator,
stop: make(chan interface{}),
- config: config,
tc: tc,
oauthServer: oauthServer,
mediaHandler: mediaHandler,
diff --git a/internal/processing/processor_test.go b/internal/processing/processor_test.go
index 181cfa63a..2ed1e1d44 100644
--- a/internal/processing/processor_test.go
+++ b/internal/processing/processor_test.go
@@ -29,7 +29,6 @@ import (
"codeberg.org/gruf/go-store/kv"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/activity/streams"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
@@ -46,7 +45,6 @@ import (
type ProcessingStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
db db.DB
storage *kv.KVStore
typeconverter typeutils.TypeConverter
@@ -93,14 +91,15 @@ func (suite *ProcessingStandardTestSuite) SetupSuite() {
Account: suite.testAccounts["local_account_1"],
},
}
- suite.testActivities = testrig.NewTestActivities(suite.testAccounts)
suite.testBlocks = testrig.NewTestBlocks()
}
func (suite *ProcessingStandardTestSuite) SetupTest() {
testrig.InitTestLog()
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestConfig()
+
suite.db = testrig.NewTestDB()
+ suite.testActivities = testrig.NewTestActivities(suite.testAccounts)
suite.storage = testrig.NewTestStorage()
suite.typeconverter = testrig.NewTestTypeConverter(suite.db)
@@ -223,7 +222,7 @@ func (suite *ProcessingStandardTestSuite) SetupTest() {
suite.timelineManager = testrig.NewTestTimelineManager(suite.db)
suite.emailSender = testrig.NewEmailSender("../../web/template/", nil)
- suite.processor = processing.NewProcessor(suite.config, suite.typeconverter, suite.federator, suite.oauthServer, suite.mediaHandler, suite.storage, suite.timelineManager, suite.db, suite.emailSender)
+ suite.processor = processing.NewProcessor(suite.typeconverter, suite.federator, suite.oauthServer, suite.mediaHandler, suite.storage, suite.timelineManager, suite.db, suite.emailSender)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
testrig.StandardStorageSetup(suite.storage, "../../testrig/media")
diff --git a/internal/processing/search.go b/internal/processing/search.go
index fbcfb02be..a7a2b3109 100644
--- a/internal/processing/search.go
+++ b/internal/processing/search.go
@@ -25,7 +25,9 @@ import (
"strings"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -164,7 +166,8 @@ func (p *processor) searchAccountByMention(ctx context.Context, authed *oauth.Au
// if it's a local account we can skip a whole bunch of stuff
maybeAcct := >smodel.Account{}
- if domain == p.config.Host {
+ host := viper.GetString(config.Keys.Host)
+ if domain == host {
maybeAcct, err = p.db.GetLocalAccountByUsername(ctx, username)
if err != nil {
return nil, fmt.Errorf("searchAccountByMention: error getting local account by username: %s", err)
diff --git a/internal/processing/status/create.go b/internal/processing/status/create.go
index 655be5b17..9bcb32b78 100644
--- a/internal/processing/status/create.go
+++ b/internal/processing/status/create.go
@@ -34,7 +34,7 @@ import (
)
func (p *processor) Create(ctx context.Context, account *gtsmodel.Account, application *gtsmodel.Application, form *apimodel.AdvancedStatusCreateForm) (*apimodel.Status, gtserror.WithCode) {
- uris := util.GenerateURIsForAccount(account.Username, p.config.Protocol, p.config.Host)
+ uris := util.GenerateURIsForAccount(account.Username)
thisStatusID, err := id.NewULID()
if err != nil {
return nil, gtserror.NewErrorInternalError(err)
diff --git a/internal/processing/status/fave.go b/internal/processing/status/fave.go
index 571e0715c..581caf055 100644
--- a/internal/processing/status/fave.go
+++ b/internal/processing/status/fave.go
@@ -76,7 +76,7 @@ func (p *processor) Fave(ctx context.Context, requestingAccount *gtsmodel.Accoun
TargetAccount: targetStatus.Account,
StatusID: targetStatus.ID,
Status: targetStatus,
- URI: util.GenerateURIForLike(requestingAccount.Username, p.config.Protocol, p.config.Host, thisFaveID),
+ URI: util.GenerateURIForLike(requestingAccount.Username, thisFaveID),
}
if err := p.db.Put(ctx, gtsFave); err != nil {
diff --git a/internal/processing/status/status.go b/internal/processing/status/status.go
index 666c237b7..da0abd6f4 100644
--- a/internal/processing/status/status.go
+++ b/internal/processing/status/status.go
@@ -22,7 +22,6 @@ import (
"context"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -71,7 +70,6 @@ type Processor interface {
type processor struct {
tc typeutils.TypeConverter
- config *config.Config
db db.DB
filter visibility.Filter
formatter text.Formatter
@@ -79,13 +77,12 @@ type processor struct {
}
// New returns a new status processor.
-func New(db db.DB, tc typeutils.TypeConverter, config *config.Config, fromClientAPI chan messages.FromClientAPI) Processor {
+func New(db db.DB, tc typeutils.TypeConverter, fromClientAPI chan messages.FromClientAPI) Processor {
return &processor{
tc: tc,
- config: config,
db: db,
filter: visibility.NewFilter(db),
- formatter: text.NewFormatter(config, db),
+ formatter: text.NewFormatter(db),
fromClientAPI: fromClientAPI,
}
}
diff --git a/internal/processing/status/status_test.go b/internal/processing/status/status_test.go
index c5c439057..2ed37bf2a 100644
--- a/internal/processing/status/status_test.go
+++ b/internal/processing/status/status_test.go
@@ -20,18 +20,16 @@ package status_test
import (
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/messages"
"github.com/superseriousbusiness/gotosocial/internal/processing/status"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
+ "github.com/superseriousbusiness/gotosocial/testrig"
)
-// nolint
type StatusStandardTestSuite struct {
suite.Suite
- config *config.Config
db db.DB
typeConverter typeutils.TypeConverter
fromClientAPIChan chan messages.FromClientAPI
@@ -50,3 +48,31 @@ type StatusStandardTestSuite struct {
// module being tested
status status.Processor
}
+
+func (suite *StatusStandardTestSuite) SetupSuite() {
+ suite.testTokens = testrig.NewTestTokens()
+ suite.testClients = testrig.NewTestClients()
+ suite.testApplications = testrig.NewTestApplications()
+ suite.testUsers = testrig.NewTestUsers()
+ suite.testAccounts = testrig.NewTestAccounts()
+ suite.testAttachments = testrig.NewTestAttachments()
+ suite.testStatuses = testrig.NewTestStatuses()
+ suite.testTags = testrig.NewTestTags()
+ suite.testMentions = testrig.NewTestMentions()
+}
+
+func (suite *StatusStandardTestSuite) SetupTest() {
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
+
+ suite.db = testrig.NewTestDB()
+ suite.typeConverter = testrig.NewTestTypeConverter(suite.db)
+ suite.fromClientAPIChan = make(chan messages.FromClientAPI, 100)
+ suite.status = status.New(suite.db, suite.typeConverter, suite.fromClientAPIChan)
+
+ testrig.StandardDBSetup(suite.db, nil)
+}
+
+func (suite *StatusStandardTestSuite) TearDownTest() {
+ testrig.StandardDBTeardown(suite.db)
+}
diff --git a/internal/processing/status/util_test.go b/internal/processing/status/util_test.go
index 37e2f2dfc..4ec66a4f7 100644
--- a/internal/processing/status/util_test.go
+++ b/internal/processing/status/util_test.go
@@ -27,9 +27,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
- "github.com/superseriousbusiness/gotosocial/internal/messages"
- "github.com/superseriousbusiness/gotosocial/internal/processing/status"
- "github.com/superseriousbusiness/gotosocial/testrig"
)
const statusText1 = `Another test @foss_satan@fossbros-anonymous.io
@@ -52,33 +49,6 @@ type UtilTestSuite struct {
StatusStandardTestSuite
}
-func (suite *UtilTestSuite) SetupSuite() {
- suite.testTokens = testrig.NewTestTokens()
- suite.testClients = testrig.NewTestClients()
- suite.testApplications = testrig.NewTestApplications()
- suite.testUsers = testrig.NewTestUsers()
- suite.testAccounts = testrig.NewTestAccounts()
- suite.testAttachments = testrig.NewTestAttachments()
- suite.testStatuses = testrig.NewTestStatuses()
- suite.testTags = testrig.NewTestTags()
- suite.testMentions = testrig.NewTestMentions()
-}
-
-func (suite *UtilTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
- testrig.InitTestLog()
- suite.typeConverter = testrig.NewTestTypeConverter(suite.db)
- suite.fromClientAPIChan = make(chan messages.FromClientAPI, 100)
- suite.status = status.New(suite.db, suite.typeConverter, suite.config, suite.fromClientAPIChan)
-
- testrig.StandardDBSetup(suite.db, nil)
-}
-
-func (suite *UtilTestSuite) TearDownTest() {
- testrig.StandardDBTeardown(suite.db)
-}
-
func (suite *UtilTestSuite) TestProcessMentions1() {
creatingAccount := suite.testAccounts["local_account_1"]
mentionedAccount := suite.testAccounts["remote_account_1"]
diff --git a/internal/processing/streaming/streaming_test.go b/internal/processing/streaming/streaming_test.go
index cbb899d12..ac143636f 100644
--- a/internal/processing/streaming/streaming_test.go
+++ b/internal/processing/streaming/streaming_test.go
@@ -38,11 +38,13 @@ type StreamingTestSuite struct {
}
func (suite *StreamingTestSuite) SetupTest() {
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
+
suite.testAccounts = testrig.NewTestAccounts()
suite.testTokens = testrig.NewTestTokens()
suite.db = testrig.NewTestDB()
suite.oauthServer = testrig.NewTestOauthServer(suite.db)
- testrig.InitTestLog()
suite.streamingProcessor = streaming.New(suite.db, suite.oauthServer)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
diff --git a/internal/processing/timeline.go b/internal/processing/timeline.go
index 64238225f..b3a4a80ae 100644
--- a/internal/processing/timeline.go
+++ b/internal/processing/timeline.go
@@ -21,10 +21,13 @@ package processing
import (
"context"
"fmt"
- "github.com/sirupsen/logrus"
"net/url"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -39,17 +42,20 @@ func (p *processor) packageStatusResponse(statuses []*apimodel.Status, path stri
// prepare the next and previous links
if len(statuses) != 0 {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+
nextLink := &url.URL{
- Scheme: p.config.Protocol,
- Host: p.config.Host,
+ Scheme: protocol,
+ Host: host,
Path: path,
RawQuery: fmt.Sprintf("limit=%d&max_id=%s", limit, nextMaxID),
}
next := fmt.Sprintf("<%s>; rel=\"next\"", nextLink.String())
prevLink := &url.URL{
- Scheme: p.config.Protocol,
- Host: p.config.Host,
+ Scheme: protocol,
+ Host: host,
Path: path,
RawQuery: fmt.Sprintf("limit=%d&min_id=%s", limit, prevMinID),
}
diff --git a/internal/processing/user/emailconfirm.go b/internal/processing/user/emailconfirm.go
index 1f9cb0a10..3e19c61d4 100644
--- a/internal/processing/user/emailconfirm.go
+++ b/internal/processing/user/emailconfirm.go
@@ -25,6 +25,8 @@ import (
"time"
"github.com/google/uuid"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
@@ -51,11 +53,12 @@ func (p *processor) SendConfirmEmail(ctx context.Context, user *gtsmodel.User, u
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
confirmationToken := uuid.NewString()
- confirmationLink := util.GenerateURIForEmailConfirm(p.config.Protocol, p.config.Host, confirmationToken)
+ confirmationLink := util.GenerateURIForEmailConfirm(confirmationToken)
// pull our instance entry from the database so we can greet the user nicely in the email
instance := >smodel.Instance{}
- if err := p.db.GetWhere(ctx, []db.Where{{Key: "domain", Value: p.config.Host}}, instance); err != nil {
+ host := viper.GetString(config.Keys.Host)
+ if err := p.db.GetWhere(ctx, []db.Where{{Key: "domain", Value: host}}, instance); err != nil {
return fmt.Errorf("SendConfirmEmail: error getting instance: %s", err)
}
diff --git a/internal/processing/user/user.go b/internal/processing/user/user.go
index 73cdb4901..2ddf12d7b 100644
--- a/internal/processing/user/user.go
+++ b/internal/processing/user/user.go
@@ -21,7 +21,6 @@ package user
import (
"context"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
@@ -40,15 +39,13 @@ type Processor interface {
}
type processor struct {
- config *config.Config
emailSender email.Sender
db db.DB
}
// New returns a new user processor
-func New(db db.DB, emailSender email.Sender, config *config.Config) Processor {
+func New(db db.DB, emailSender email.Sender) Processor {
return &processor{
- config: config,
emailSender: emailSender,
db: db,
}
diff --git a/internal/processing/user/user_test.go b/internal/processing/user/user_test.go
index 5c3cd7597..f9514d720 100644
--- a/internal/processing/user/user_test.go
+++ b/internal/processing/user/user_test.go
@@ -20,7 +20,6 @@ package user_test
import (
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -30,7 +29,6 @@ import (
type UserStandardTestSuite struct {
suite.Suite
- config *config.Config
emailSender email.Sender
db db.DB
@@ -43,13 +41,14 @@ type UserStandardTestSuite struct {
func (suite *UserStandardTestSuite) SetupTest() {
testrig.InitTestLog()
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestConfig()
+
suite.db = testrig.NewTestDB()
suite.sentEmails = make(map[string]string)
suite.emailSender = testrig.NewEmailSender("../../../web/template/", suite.sentEmails)
suite.testUsers = testrig.NewTestUsers()
- suite.user = user.New(suite.db, suite.emailSender, suite.config)
+ suite.user = user.New(suite.db, suite.emailSender)
testrig.StandardDBSetup(suite.db, nil)
}
diff --git a/internal/router/cors.go b/internal/router/cors.go
index 9f8d379dd..e2ce9ce87 100644
--- a/internal/router/cors.go
+++ b/internal/router/cors.go
@@ -23,7 +23,6 @@ import (
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
- "github.com/superseriousbusiness/gotosocial/internal/config"
)
var corsConfig = cors.Config{
@@ -81,7 +80,7 @@ var corsConfig = cors.Config{
}
// useCors attaches the corsConfig above to the given gin engine
-func useCors(cfg *config.Config, engine *gin.Engine) error {
+func useCors(engine *gin.Engine) error {
c := cors.New(corsConfig)
engine.Use(c)
return nil
diff --git a/internal/router/router.go b/internal/router/router.go
index aef5c32e4..aa588906f 100644
--- a/internal/router/router.go
+++ b/internal/router/router.go
@@ -26,6 +26,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"golang.org/x/crypto/acme/autocert"
@@ -58,7 +59,6 @@ type Router interface {
type router struct {
engine *gin.Engine
srv *http.Server
- config *config.Config
certManager *autocert.Manager
}
@@ -69,10 +69,16 @@ func (r *router) AttachStaticFS(relativePath string, fs http.FileSystem) {
// Start starts the router nicely. It will serve two handlers if letsencrypt is enabled, and only the web/API handler if letsencrypt is not enabled.
func (r *router) Start() {
- if r.config.LetsEncryptConfig.Enabled {
+ keys := config.Keys
+ leEnabled := viper.GetBool(keys.LetsEncryptEnabled)
+
+ if leEnabled {
+ bindAddress := viper.GetString(keys.BindAddress)
+ lePort := viper.GetInt(keys.LetsEncryptPort)
+
// serve the http handler on the selected letsencrypt port, for receiving letsencrypt requests and solving their devious riddles
go func() {
- listen := fmt.Sprintf("%s:%d", r.config.BindAddress, r.config.LetsEncryptConfig.Port)
+ listen := fmt.Sprintf("%s:%d", bindAddress, lePort)
if err := http.ListenAndServe(listen, r.certManager.HTTPHandler(http.HandlerFunc(httpsRedirect))); err != nil && err != http.ErrServerClosed {
logrus.Fatalf("listen: %s", err)
}
@@ -103,7 +109,9 @@ func (r *router) Stop(ctx context.Context) error {
//
// The given DB is only used in the New function for parsing config values, and is not otherwise
// pinned to the router.
-func New(ctx context.Context, cfg *config.Config, db db.DB) (Router, error) {
+func New(ctx context.Context, db db.DB) (Router, error) {
+ keys := config.Keys
+
gin.SetMode(gin.ReleaseMode)
// create the actual engine here -- this is the core request routing handler for gts
@@ -116,12 +124,13 @@ func New(ctx context.Context, cfg *config.Config, db db.DB) (Router, error) {
engine.MaxMultipartMemory = 8 << 20
// set up IP forwarding via x-forward-* headers.
- if err := engine.SetTrustedProxies(cfg.TrustedProxies); err != nil {
+ trustedProxies := viper.GetStringSlice(keys.TrustedProxies)
+ if err := engine.SetTrustedProxies(trustedProxies); err != nil {
return nil, err
}
// enable cors on the engine
- if err := useCors(cfg, engine); err != nil {
+ if err := useCors(engine); err != nil {
return nil, err
}
@@ -129,17 +138,19 @@ func New(ctx context.Context, cfg *config.Config, db db.DB) (Router, error) {
loadTemplateFunctions(engine)
// load templates onto the engine
- if err := loadTemplates(cfg, engine); err != nil {
+ if err := loadTemplates(engine); err != nil {
return nil, err
}
// enable session store middleware on the engine
- if err := useSession(ctx, cfg, db, engine); err != nil {
+ if err := useSession(ctx, db, engine); err != nil {
return nil, err
}
// create the http server here, passing the gin engine as handler
- listen := fmt.Sprintf("%s:%d", cfg.BindAddress, cfg.Port)
+ bindAddress := viper.GetString(keys.BindAddress)
+ port := viper.GetInt(keys.Port)
+ listen := fmt.Sprintf("%s:%d", bindAddress, port)
s := &http.Server{
Addr: listen,
Handler: engine,
@@ -151,15 +162,19 @@ func New(ctx context.Context, cfg *config.Config, db db.DB) (Router, error) {
// We need to spawn the underlying server slightly differently depending on whether lets encrypt is enabled or not.
// In either case, the gin engine will still be used for routing requests.
+ leEnabled := viper.GetBool(keys.LetsEncryptEnabled)
var m *autocert.Manager
- if cfg.LetsEncryptConfig.Enabled {
+ if leEnabled {
// le IS enabled, so roll up an autocert manager for handling letsencrypt requests
+ host := viper.GetString(keys.Host)
+ leCertDir := viper.GetString(keys.LetsEncryptCertDir)
+ leEmailAddress := viper.GetString(keys.LetsEncryptEmailAddress)
m = &autocert.Manager{
Prompt: autocert.AcceptTOS,
- HostPolicy: autocert.HostWhitelist(cfg.Host),
- Cache: autocert.DirCache(cfg.LetsEncryptConfig.CertDir),
- Email: cfg.LetsEncryptConfig.EmailAddress,
+ HostPolicy: autocert.HostWhitelist(host),
+ Cache: autocert.DirCache(leCertDir),
+ Email: leEmailAddress,
}
s.TLSConfig = m.TLSConfig()
}
@@ -167,7 +182,6 @@ func New(ctx context.Context, cfg *config.Config, db db.DB) (Router, error) {
return &router{
engine: engine,
srv: s,
- config: cfg,
certManager: m,
}, nil
}
diff --git a/internal/router/session.go b/internal/router/session.go
index 3276c38aa..1f7b8bcfa 100644
--- a/internal/router/session.go
+++ b/internal/router/session.go
@@ -28,15 +28,16 @@ import (
"github.com/gin-contrib/sessions"
"github.com/gin-contrib/sessions/memstore"
"github.com/gin-gonic/gin"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
)
// sessionOptions returns the standard set of options to use for each session.
-func sessionOptions(cfg *config.Config) sessions.Options {
+func sessionOptions() sessions.Options {
return sessions.Options{
Path: "/",
- Domain: cfg.Host,
+ Domain: viper.GetString(config.Keys.Host),
MaxAge: 120, // 2 minutes
Secure: true, // only use cookie over https
HttpOnly: true, // exclude javascript from inspecting cookie
@@ -44,9 +45,12 @@ func sessionOptions(cfg *config.Config) sessions.Options {
}
}
-func sessionName(cfg *config.Config) (string, error) {
+// SessionName is a utility function that derives an appropriate session name from the hostname.
+func SessionName() (string, error) {
// parse the protocol + host
- u, err := url.Parse(fmt.Sprintf("%s://%s", cfg.Protocol, cfg.Host))
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+ u, err := url.Parse(fmt.Sprintf("%s://%s", protocol, host))
if err != nil {
return "", err
}
@@ -54,13 +58,13 @@ func sessionName(cfg *config.Config) (string, error) {
// take the hostname without any port attached
strippedHostname := u.Hostname()
if strippedHostname == "" {
- return "", fmt.Errorf("could not derive hostname without port from %s://%s", cfg.Protocol, cfg.Host)
+ return "", fmt.Errorf("could not derive hostname without port from %s://%s", protocol, host)
}
return fmt.Sprintf("gotosocial-%s", strippedHostname), nil
}
-func useSession(ctx context.Context, cfg *config.Config, sessionDB db.Session, engine *gin.Engine) error {
+func useSession(ctx context.Context, sessionDB db.Session, engine *gin.Engine) error {
// check if we have a saved router session already
rs, err := sessionDB.GetSession(ctx)
if err != nil {
@@ -71,9 +75,9 @@ func useSession(ctx context.Context, cfg *config.Config, sessionDB db.Session, e
}
store := memstore.NewStore(rs.Auth, rs.Crypt)
- store.Options(sessionOptions(cfg))
+ store.Options(sessionOptions())
- sessionName, err := sessionName(cfg)
+ sessionName, err := SessionName()
if err != nil {
return err
}
diff --git a/internal/router/session_test.go b/internal/router/session_test.go
index 7c2d324fd..31beec1ae 100644
--- a/internal/router/session_test.go
+++ b/internal/router/session_test.go
@@ -16,68 +16,68 @@
along with this program. If not, see .
*/
-package router
+package router_test
import (
"testing"
+ "github.com/spf13/viper"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/config"
+ "github.com/superseriousbusiness/gotosocial/internal/router"
+ "github.com/superseriousbusiness/gotosocial/testrig"
)
type SessionTestSuite struct {
suite.Suite
}
-func (suite *SessionTestSuite) TestDeriveSessionNameLocalhostWithPort() {
- cfg := &config.Config{
- Protocol: "http",
- Host: "localhost:8080",
- }
+func (suite *SessionTestSuite) SetupTest() {
+ testrig.InitTestConfig()
+}
- sessionName, err := sessionName(cfg)
+func (suite *SessionTestSuite) TestDeriveSessionNameLocalhostWithPort() {
+ viper.Set(config.Keys.Protocol, "http")
+ viper.Set(config.Keys.Host, "localhost:8080")
+
+ sessionName, err := router.SessionName()
suite.NoError(err)
suite.Equal("gotosocial-localhost", sessionName)
}
func (suite *SessionTestSuite) TestDeriveSessionNameLocalhost() {
- cfg := &config.Config{
- Protocol: "http",
- Host: "localhost",
- }
+ viper.Set(config.Keys.Protocol, "http")
+ viper.Set(config.Keys.Host, "localhost")
- sessionName, err := sessionName(cfg)
+ sessionName, err := router.SessionName()
suite.NoError(err)
suite.Equal("gotosocial-localhost", sessionName)
}
func (suite *SessionTestSuite) TestDeriveSessionNoProtocol() {
- cfg := &config.Config{
- Host: "localhost",
- }
+ viper.Set(config.Keys.Protocol, "")
+ viper.Set(config.Keys.Host, "localhost")
- sessionName, err := sessionName(cfg)
+ sessionName, err := router.SessionName()
suite.EqualError(err, "parse \"://localhost\": missing protocol scheme")
suite.Equal("", sessionName)
}
func (suite *SessionTestSuite) TestDeriveSessionNoHost() {
- cfg := &config.Config{
- Protocol: "https",
- }
+ viper.Set(config.Keys.Protocol, "https")
+ viper.Set(config.Keys.Host, "")
+ viper.Set(config.Keys.Port, 0)
- sessionName, err := sessionName(cfg)
+ sessionName, err := router.SessionName()
suite.EqualError(err, "could not derive hostname without port from https://")
suite.Equal("", sessionName)
}
func (suite *SessionTestSuite) TestDeriveSessionOK() {
- cfg := &config.Config{
- Protocol: "https",
- Host: "example.org",
- }
+ viper.Set(config.Keys.Protocol, "https")
+ viper.Set(config.Keys.Host, "example.org")
- sessionName, err := sessionName(cfg)
+ sessionName, err := router.SessionName()
suite.NoError(err)
suite.Equal("gotosocial-example.org", sessionName)
}
diff --git a/internal/router/template.go b/internal/router/template.go
index bf5682628..b0d998208 100644
--- a/internal/router/template.go
+++ b/internal/router/template.go
@@ -26,18 +26,20 @@ import (
"time"
"github.com/gin-gonic/gin"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/config"
)
// loadTemplates loads html templates for use by the given engine
-func loadTemplates(cfg *config.Config, engine *gin.Engine) error {
+func loadTemplates(engine *gin.Engine) error {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("error getting current working directory: %s", err)
}
- tmPath := filepath.Join(cwd, fmt.Sprintf("%s*", cfg.TemplateConfig.BaseDir))
+ templateBaseDir := viper.GetString(config.Keys.WebTemplateBaseDir)
+ tmPath := filepath.Join(cwd, fmt.Sprintf("%s*", templateBaseDir))
engine.LoadHTMLGlob(tmPath)
return nil
diff --git a/internal/text/formatter.go b/internal/text/formatter.go
index b0988e9f9..f7c908a7d 100644
--- a/internal/text/formatter.go
+++ b/internal/text/formatter.go
@@ -21,7 +21,6 @@ package text
import (
"context"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
)
@@ -42,14 +41,12 @@ type Formatter interface {
}
type formatter struct {
- cfg *config.Config
- db db.DB
+ db db.DB
}
// NewFormatter returns a new Formatter interface for parsing statuses and other text input into nice html.
-func NewFormatter(cfg *config.Config, db db.DB) Formatter {
+func NewFormatter(db db.DB) Formatter {
return &formatter{
- cfg: cfg,
- db: db,
+ db: db,
}
}
diff --git a/internal/text/formatter_test.go b/internal/text/formatter_test.go
index 8b4d176e2..096d32789 100644
--- a/internal/text/formatter_test.go
+++ b/internal/text/formatter_test.go
@@ -20,7 +20,6 @@ package text_test
import (
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/text"
@@ -30,8 +29,7 @@ import (
type TextStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
- db db.DB
+ db db.DB
// standard suite models
testTokens map[string]*gtsmodel.Token
@@ -61,9 +59,11 @@ func (suite *TextStandardTestSuite) SetupSuite() {
}
func (suite *TextStandardTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
+ testrig.InitTestLog()
+ testrig.InitTestConfig()
+
suite.db = testrig.NewTestDB()
- suite.formatter = text.NewFormatter(suite.config, suite.db)
+ suite.formatter = text.NewFormatter(suite.db)
testrig.StandardDBSetup(suite.db, nil)
}
diff --git a/internal/timeline/get_test.go b/internal/timeline/get_test.go
index b05585cab..cebb9033f 100644
--- a/internal/timeline/get_test.go
+++ b/internal/timeline/get_test.go
@@ -38,9 +38,10 @@ func (suite *GetTestSuite) SetupSuite() {
}
func (suite *GetTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+
+ suite.db = testrig.NewTestDB()
suite.tc = testrig.NewTestTypeConverter(suite.db)
testrig.StandardDBSetup(suite.db, nil)
diff --git a/internal/timeline/index_test.go b/internal/timeline/index_test.go
index 0f3a1df4a..a326f223e 100644
--- a/internal/timeline/index_test.go
+++ b/internal/timeline/index_test.go
@@ -39,9 +39,10 @@ func (suite *IndexTestSuite) SetupSuite() {
}
func (suite *IndexTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+
+ suite.db = testrig.NewTestDB()
suite.tc = testrig.NewTestTypeConverter(suite.db)
testrig.StandardDBSetup(suite.db, nil)
diff --git a/internal/timeline/manager.go b/internal/timeline/manager.go
index 5d41d72a9..c5b10921b 100644
--- a/internal/timeline/manager.go
+++ b/internal/timeline/manager.go
@@ -26,7 +26,6 @@ import (
"github.com/sirupsen/logrus"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
@@ -84,12 +83,11 @@ type Manager interface {
}
// NewManager returns a new timeline manager with the given database, typeconverter, config, and log.
-func NewManager(db db.DB, tc typeutils.TypeConverter, config *config.Config) Manager {
+func NewManager(db db.DB, tc typeutils.TypeConverter) Manager {
return &manager{
accountTimelines: sync.Map{},
db: db,
tc: tc,
- config: config,
}
}
@@ -97,7 +95,6 @@ type manager struct {
accountTimelines sync.Map
db db.DB
tc typeutils.TypeConverter
- config *config.Config
}
func (m *manager) Ingest(ctx context.Context, status *gtsmodel.Status, timelineAccountID string) (bool, error) {
diff --git a/internal/timeline/manager_test.go b/internal/timeline/manager_test.go
index 4d725e48d..f5d62fe42 100644
--- a/internal/timeline/manager_test.go
+++ b/internal/timeline/manager_test.go
@@ -36,9 +36,10 @@ func (suite *ManagerTestSuite) SetupSuite() {
}
func (suite *ManagerTestSuite) SetupTest() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+
+ suite.db = testrig.NewTestDB()
suite.tc = testrig.NewTestTypeConverter(suite.db)
testrig.StandardDBSetup(suite.db, nil)
diff --git a/internal/timeline/timeline_test.go b/internal/timeline/timeline_test.go
index 517575f3e..8ef407711 100644
--- a/internal/timeline/timeline_test.go
+++ b/internal/timeline/timeline_test.go
@@ -20,7 +20,6 @@ package timeline_test
import (
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/timeline"
@@ -29,9 +28,8 @@ import (
type TimelineStandardTestSuite struct {
suite.Suite
- config *config.Config
- db db.DB
- tc typeutils.TypeConverter
+ db db.DB
+ tc typeutils.TypeConverter
testAccounts map[string]*gtsmodel.Account
testStatuses map[string]*gtsmodel.Status
diff --git a/internal/trans/trans_test.go b/internal/trans/trans_test.go
index 6554167b0..a6669f397 100644
--- a/internal/trans/trans_test.go
+++ b/internal/trans/trans_test.go
@@ -30,8 +30,9 @@ type TransTestSuite struct {
}
func (suite *TransTestSuite) SetupTest() {
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+ suite.db = testrig.NewTestDB()
testrig.StandardDBSetup(suite.db, nil)
}
diff --git a/internal/transport/controller.go b/internal/transport/controller.go
index 86f612c15..1f46494dc 100644
--- a/internal/transport/controller.go
+++ b/internal/transport/controller.go
@@ -25,6 +25,7 @@ import (
"sync"
"github.com/go-fed/httpsig"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/activity/pub"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
@@ -37,7 +38,6 @@ type Controller interface {
}
type controller struct {
- config *config.Config
db db.DB
clock pub.Clock
client pub.HttpClient
@@ -45,13 +45,16 @@ type controller struct {
}
// NewController returns an implementation of the Controller interface for creating new transports
-func NewController(config *config.Config, db db.DB, clock pub.Clock, client pub.HttpClient) Controller {
+func NewController(db db.DB, clock pub.Clock, client pub.HttpClient) Controller {
+ applicationName := viper.GetString(config.Keys.ApplicationName)
+ host := viper.GetString(config.Keys.Host)
+ appAgent := fmt.Sprintf("%s %s", applicationName, host)
+
return &controller{
- config: config,
db: db,
clock: clock,
client: client,
- appAgent: fmt.Sprintf("%s %s", config.ApplicationName, config.Host),
+ appAgent: appAgent,
}
}
@@ -93,7 +96,7 @@ func (c *controller) NewTransportForUsername(ctx context.Context, username strin
// Otherwise, we can take the instance account and use those credentials to make the request.
var u string
if username == "" {
- u = c.config.Host
+ u = viper.GetString(config.Keys.Host)
} else {
u = username
}
diff --git a/internal/typeutils/converter.go b/internal/typeutils/converter.go
index 212ae1247..448fef5c8 100644
--- a/internal/typeutils/converter.go
+++ b/internal/typeutils/converter.go
@@ -26,7 +26,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/ap"
"github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/cache"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
)
@@ -192,15 +191,13 @@ type TypeConverter interface {
}
type converter struct {
- config *config.Config
db db.DB
asCache cache.Cache
}
// NewConverter returns a new Converter
-func NewConverter(config *config.Config, db db.DB) TypeConverter {
+func NewConverter(db db.DB) TypeConverter {
return &converter{
- config: config,
db: db,
asCache: cache.New(),
}
diff --git a/internal/typeutils/converter_test.go b/internal/typeutils/converter_test.go
index d557e52f2..57952a84a 100644
--- a/internal/typeutils/converter_test.go
+++ b/internal/typeutils/converter_test.go
@@ -21,7 +21,6 @@ package typeutils_test
import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/activity/streams/vocab"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
@@ -324,7 +323,6 @@ const (
type TypeUtilsTestSuite struct {
suite.Suite
- config *config.Config
db db.DB
testAccounts map[string]*gtsmodel.Account
testStatuses map[string]*gtsmodel.Status
@@ -334,13 +332,14 @@ type TypeUtilsTestSuite struct {
}
func (suite *TypeUtilsTestSuite) SetupSuite() {
- suite.config = testrig.NewTestConfig()
- suite.db = testrig.NewTestDB()
testrig.InitTestLog()
+ testrig.InitTestConfig()
+
+ suite.db = testrig.NewTestDB()
suite.testAccounts = testrig.NewTestAccounts()
suite.testStatuses = testrig.NewTestStatuses()
suite.testPeople = testrig.NewTestFediPeople()
- suite.typeconverter = typeutils.NewConverter(suite.config, suite.db)
+ suite.typeconverter = typeutils.NewConverter(suite.db)
}
func (suite *TypeUtilsTestSuite) SetupTest() {
diff --git a/internal/typeutils/internal.go b/internal/typeutils/internal.go
index b6a425732..2257d7203 100644
--- a/internal/typeutils/internal.go
+++ b/internal/typeutils/internal.go
@@ -25,7 +25,7 @@ func (c *converter) FollowRequestToFollow(ctx context.Context, f *gtsmodel.Follo
func (c *converter) StatusToBoost(ctx context.Context, s *gtsmodel.Status, boostingAccount *gtsmodel.Account) (*gtsmodel.Status, error) {
// the wrapper won't use the same ID as the boosted status so we generate some new UUIDs
- uris := util.GenerateURIsForAccount(boostingAccount.Username, c.config.Protocol, c.config.Host)
+ uris := util.GenerateURIsForAccount(boostingAccount.Username)
boostWrapperStatusID, err := id.NewULID()
if err != nil {
return nil, err
diff --git a/internal/typeutils/internaltoas.go b/internal/typeutils/internaltoas.go
index ae8e3b1f2..1bf2416b8 100644
--- a/internal/typeutils/internaltoas.go
+++ b/internal/typeutils/internaltoas.go
@@ -25,9 +25,11 @@ import (
"fmt"
"net/url"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/activity/pub"
"github.com/superseriousbusiness/activity/streams"
"github.com/superseriousbusiness/activity/streams/vocab"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
)
@@ -620,7 +622,8 @@ func (c *converter) MentionToAS(ctx context.Context, m *gtsmodel.Mention) (vocab
// name -- this should be the namestring of the mentioned user, something like @whatever@example.org
var domain string
if m.TargetAccount.Domain == "" {
- domain = c.config.AccountDomain
+ accountDomain := viper.GetString(config.Keys.AccountDomain)
+ domain = accountDomain
} else {
domain = m.TargetAccount.Domain
}
diff --git a/internal/typeutils/internaltofrontend.go b/internal/typeutils/internaltofrontend.go
index 272ffc175..4dd8e335b 100644
--- a/internal/typeutils/internaltofrontend.go
+++ b/internal/typeutils/internaltofrontend.go
@@ -21,11 +21,14 @@ package typeutils
import (
"context"
"fmt"
- "github.com/sirupsen/logrus"
"strings"
"time"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
"github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
)
@@ -567,34 +570,32 @@ func (c *converter) InstanceToAPIInstance(ctx context.Context, i *gtsmodel.Insta
}
// if the requested instance is *this* instance, we can add some extra information
- if i.Domain == c.config.Host {
- userCountKey := "user_count"
- statusCountKey := "status_count"
- domainCountKey := "domain_count"
-
- userCount, err := c.db.CountInstanceUsers(ctx, c.config.Host)
+ keys := config.Keys
+ host := viper.GetString(keys.Host)
+ if i.Domain == host {
+ userCount, err := c.db.CountInstanceUsers(ctx, host)
if err == nil {
- mi.Stats[userCountKey] = userCount
+ mi.Stats["user_count"] = userCount
}
- statusCount, err := c.db.CountInstanceStatuses(ctx, c.config.Host)
+ statusCount, err := c.db.CountInstanceStatuses(ctx, host)
if err == nil {
- mi.Stats[statusCountKey] = statusCount
+ mi.Stats["status_count"] = statusCount
}
- domainCount, err := c.db.CountInstanceDomains(ctx, c.config.Host)
+ domainCount, err := c.db.CountInstanceDomains(ctx, host)
if err == nil {
- mi.Stats[domainCountKey] = domainCount
+ mi.Stats["domain_count"] = domainCount
}
- mi.Registrations = c.config.AccountsConfig.OpenRegistration
- mi.ApprovalRequired = c.config.AccountsConfig.RequireApproval
+ mi.Registrations = viper.GetBool(keys.AccountsRegistrationOpen)
+ mi.ApprovalRequired = viper.GetBool(keys.AccountsApprovalRequired)
mi.InvitesEnabled = false // TODO
- mi.MaxTootChars = uint(c.config.StatusesConfig.MaxChars)
+ mi.MaxTootChars = uint(viper.GetInt(keys.StatusesMaxChars))
mi.URLS = &model.InstanceURLs{
- StreamingAPI: fmt.Sprintf("wss://%s", c.config.Host),
+ StreamingAPI: fmt.Sprintf("wss://%s", host),
}
- mi.Version = c.config.SoftwareVersion
+ mi.Version = viper.GetString(keys.SoftwareVersion)
}
// get the instance account if it exists and just skip if it doesn't
diff --git a/internal/typeutils/wrap.go b/internal/typeutils/wrap.go
index b5938c419..0f82a679b 100644
--- a/internal/typeutils/wrap.go
+++ b/internal/typeutils/wrap.go
@@ -33,7 +33,7 @@ func (c *converter) WrapPersonInUpdate(person vocab.ActivityStreamsPerson, origi
return nil, err
}
- idString := util.GenerateURIForUpdate(originAccount.Username, c.config.Protocol, c.config.Host, newID)
+ idString := util.GenerateURIForUpdate(originAccount.Username, newID)
idURI, err := url.Parse(idString)
if err != nil {
return nil, fmt.Errorf("WrapPersonInUpdate: error parsing url %s: %s", idString, err)
diff --git a/internal/util/uri.go b/internal/util/uri.go
index d1ae1de41..b9ef01799 100644
--- a/internal/util/uri.go
+++ b/internal/util/uri.go
@@ -22,6 +22,8 @@ import (
"fmt"
"net/url"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/regexes"
)
@@ -116,36 +118,49 @@ type UserURIs struct {
// GenerateURIForFollow returns the AP URI for a new follow -- something like:
// https://example.org/users/whatever_user/follow/01F7XTH1QGBAPMGF49WJZ91XGC
-func GenerateURIForFollow(username string, protocol string, host string, thisFollowID string) string {
+func GenerateURIForFollow(username string, thisFollowID string) string {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
return fmt.Sprintf("%s://%s/%s/%s/%s/%s", protocol, host, UsersPath, username, FollowPath, thisFollowID)
}
// GenerateURIForLike returns the AP URI for a new like/fave -- something like:
// https://example.org/users/whatever_user/liked/01F7XTH1QGBAPMGF49WJZ91XGC
-func GenerateURIForLike(username string, protocol string, host string, thisFavedID string) string {
+func GenerateURIForLike(username string, thisFavedID string) string {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
return fmt.Sprintf("%s://%s/%s/%s/%s/%s", protocol, host, UsersPath, username, LikedPath, thisFavedID)
}
// GenerateURIForUpdate returns the AP URI for a new update activity -- something like:
// https://example.org/users/whatever_user#updates/01F7XTH1QGBAPMGF49WJZ91XGC
-func GenerateURIForUpdate(username string, protocol string, host string, thisUpdateID string) string {
+func GenerateURIForUpdate(username string, thisUpdateID string) string {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
return fmt.Sprintf("%s://%s/%s/%s#%s/%s", protocol, host, UsersPath, username, UpdatePath, thisUpdateID)
}
// GenerateURIForBlock returns the AP URI for a new block activity -- something like:
// https://example.org/users/whatever_user/blocks/01F7XTH1QGBAPMGF49WJZ91XGC
-func GenerateURIForBlock(username string, protocol string, host string, thisBlockID string) string {
+func GenerateURIForBlock(username string, thisBlockID string) string {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
return fmt.Sprintf("%s://%s/%s/%s/%s/%s", protocol, host, UsersPath, username, BlocksPath, thisBlockID)
}
// GenerateURIForEmailConfirm returns a link for email confirmation -- something like:
// https://example.org/confirm_email?token=490e337c-0162-454f-ac48-4b22bb92a205
-func GenerateURIForEmailConfirm(protocol string, host string, token string) string {
+func GenerateURIForEmailConfirm(token string) string {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
return fmt.Sprintf("%s://%s/%s?token=%s", protocol, host, ConfirmEmailPath, token)
}
// GenerateURIsForAccount throws together a bunch of URIs for the given username, with the given protocol and host.
-func GenerateURIsForAccount(username string, protocol string, host string) *UserURIs {
+func GenerateURIsForAccount(username string) *UserURIs {
+ protocol := viper.GetString(config.Keys.Protocol)
+ host := viper.GetString(config.Keys.Host)
+
// The below URLs are used for serving web requests
hostURL := fmt.Sprintf("%s://%s", protocol, host)
userURL := fmt.Sprintf("%s/@%s", hostURL, username)
diff --git a/internal/visibility/filter_test.go b/internal/visibility/filter_test.go
index a140d48e2..12c1a72a6 100644
--- a/internal/visibility/filter_test.go
+++ b/internal/visibility/filter_test.go
@@ -20,7 +20,6 @@ package visibility_test
import (
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/visibility"
@@ -30,8 +29,7 @@ import (
type FilterStandardTestSuite struct {
// standard suite interfaces
suite.Suite
- config *config.Config
- db db.DB
+ db db.DB
// standard suite models
testTokens map[string]*gtsmodel.Token
@@ -61,8 +59,8 @@ func (suite *FilterStandardTestSuite) SetupSuite() {
func (suite *FilterStandardTestSuite) SetupTest() {
testrig.InitTestLog()
+ testrig.InitTestConfig()
- suite.config = testrig.NewTestConfig()
suite.db = testrig.NewTestDB()
suite.filter = visibility.NewFilter(suite.db)
diff --git a/internal/web/base.go b/internal/web/base.go
index 5d19a3f70..9d99fddd1 100644
--- a/internal/web/base.go
+++ b/internal/web/base.go
@@ -26,6 +26,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
"github.com/superseriousbusiness/gotosocial/internal/api"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/processing"
@@ -40,14 +41,12 @@ const (
// Module implements the api.ClientModule interface for web pages.
type Module struct {
- config *config.Config
processor processing.Processor
}
// New returns a new api.ClientModule for web pages.
-func New(config *config.Config, processor processing.Processor) api.ClientModule {
+func New(processor processing.Processor) api.ClientModule {
return &Module{
- config: config,
processor: processor,
}
}
@@ -56,7 +55,8 @@ func (m *Module) baseHandler(c *gin.Context) {
l := logrus.WithField("func", "BaseGETHandler")
l.Trace("serving index html")
- instance, err := m.processor.InstanceGet(c.Request.Context(), m.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ instance, err := m.processor.InstanceGet(c.Request.Context(), host)
if err != nil {
l.Debugf("error getting instance from processor: %s", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
@@ -73,7 +73,8 @@ func (m *Module) NotFoundHandler(c *gin.Context) {
l := logrus.WithField("func", "404")
l.Trace("serving 404 html")
- instance, err := m.processor.InstanceGet(c.Request.Context(), m.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ instance, err := m.processor.InstanceGet(c.Request.Context(), host)
if err != nil {
l.Debugf("error getting instance from processor: %s", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
@@ -87,17 +88,17 @@ func (m *Module) NotFoundHandler(c *gin.Context) {
// Route satisfies the RESTAPIModule interface
func (m *Module) Route(s router.Router) error {
-
// serve static files from /assets
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("error getting current working directory: %s", err)
}
- assetPath := filepath.Join(cwd, m.config.TemplateConfig.AssetBaseDir)
+ assetBaseDir := viper.GetString(config.Keys.WebAssetBaseDir)
+ assetPath := filepath.Join(cwd, assetBaseDir)
s.AttachStaticFS("/assets", fileSystem{http.Dir(assetPath)})
// Admin panel route, if it exists
- adminPath := filepath.Join(cwd, m.config.TemplateConfig.AssetBaseDir, "/admin")
+ adminPath := filepath.Join(cwd, assetBaseDir, "/admin")
s.AttachStaticFS("/admin", fileSystem{http.Dir(adminPath)})
// serve front-page
diff --git a/internal/web/confirmemail.go b/internal/web/confirmemail.go
index 97ed597d3..6eaa940c6 100644
--- a/internal/web/confirmemail.go
+++ b/internal/web/confirmemail.go
@@ -23,6 +23,8 @@ import (
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
)
func (m *Module) confirmEmailGETHandler(c *gin.Context) {
@@ -43,7 +45,8 @@ func (m *Module) confirmEmailGETHandler(c *gin.Context) {
return
}
- instance, err := m.processor.InstanceGet(ctx, m.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ instance, err := m.processor.InstanceGet(ctx, host)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
diff --git a/internal/web/thread.go b/internal/web/thread.go
index a359107cc..578722674 100644
--- a/internal/web/thread.go
+++ b/internal/web/thread.go
@@ -19,10 +19,13 @@
package web
import (
- "github.com/sirupsen/logrus"
"net/http"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+
"github.com/gin-gonic/gin"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
)
@@ -51,7 +54,8 @@ func (m *Module) threadTemplateHandler(c *gin.Context) {
return
}
- instance, err := m.processor.InstanceGet(ctx, m.config.Host)
+ host := viper.GetString(config.Keys.Host)
+ instance, err := m.processor.InstanceGet(ctx, host)
if err != nil {
l.Debugf("error getting instance from processor: %s", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
diff --git a/scripts/build.sh b/scripts/build.sh
index 565435a69..6c3f86778 100755
--- a/scripts/build.sh
+++ b/scripts/build.sh
@@ -2,7 +2,7 @@
set -eu
-COMMIT="${COMMIT:-12345678}"
+COMMIT="${COMMIT:-1234567}"
VERSION="${VERSION:-0.0.0}"
CGO_ENABLED=0 go build -trimpath \
diff --git a/test/cliparsing.sh b/test/cliparsing.sh
new file mode 100755
index 000000000..13e61ede3
--- /dev/null
+++ b/test/cliparsing.sh
@@ -0,0 +1,117 @@
+#!/bin/sh
+
+set -e
+
+echo "STARTING CLI TESTS"
+
+echo "TEST_1 Make sure defaults are set correctly."
+TEST_1_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"localhost","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_1="$(go run ./cmd/gotosocial/... debug config)"
+if [ "${TEST_1}" != "${TEST_1_EXPECTED}" ]; then
+ echo "TEST_1 not equal TEST_1_EXPECTED"
+ exit 1
+else
+ echo "TEST_1 OK"
+fi
+
+echo "TEST_2 Override db-address from default using cli flag."
+TEST_2_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.db.address","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_2="$(go run ./cmd/gotosocial/... --db-address some.db.address debug config)"
+if [ "${TEST_2}" != "${TEST_2_EXPECTED}" ]; then
+ echo "TEST_2 not equal TEST_2_EXPECTED"
+ exit 1
+else
+ echo "TEST_2 OK"
+fi
+
+echo "TEST_3 Override db-address from default using env var."
+TEST_3_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.db.address","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_3="$(GTS_DB_ADDRESS=some.db.address go run ./cmd/gotosocial/... debug config)"
+if [ "${TEST_3}" != "${TEST_3_EXPECTED}" ]; then
+ echo "TEST_3 not equal TEST_3_EXPECTED"
+ exit 1
+else
+ echo "TEST_3 OK"
+fi
+
+echo "TEST_4 Override db-address from default using both env var and cli flag. The cli flag should take priority."
+TEST_4_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.other.db.address","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_4="$(GTS_DB_ADDRESS=some.db.address go run ./cmd/gotosocial/... --db-address some.other.db.address debug config)"
+if [ "${TEST_4}" != "${TEST_4_EXPECTED}" ]; then
+ echo "TEST_4 not equal TEST_4_EXPECTED"
+ exit 1
+else
+ echo "TEST_4 OK"
+fi
+
+echo "TEST_5 Test loading a config file by passing an env var."
+TEST_5_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"gts.example.org","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_5="$(GTS_CONFIG_PATH=./test/test.yaml go run ./cmd/gotosocial/... debug config)"
+if [ "${TEST_5}" != "${TEST_5_EXPECTED}" ]; then
+ echo "TEST_5 not equal TEST_5_EXPECTED"
+ exit 1
+else
+ echo "TEST_5 OK"
+fi
+
+echo "TEST_6 Test loading a config file by passing cli flag."
+TEST_6_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"gts.example.org","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_6="$(go run ./cmd/gotosocial/... --config-path ./test/test.yaml debug config)"
+if [ "${TEST_6}" != "${TEST_6_EXPECTED}" ]; then
+ echo "TEST_6 not equal TEST_6_EXPECTED"
+ exit 1
+else
+ echo "TEST_6 OK"
+fi
+
+echo "TEST_7 Test loading a config file and overriding one of the variables with a cli flag."
+TEST_7_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"gts.example.org","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_7="$(go run ./cmd/gotosocial/... --config-path ./test/test.yaml --account-domain '' debug config)"
+if [ "${TEST_7}" != "${TEST_7_EXPECTED}" ]; then
+ echo "TEST_7 not equal TEST_7_EXPECTED"
+ exit 1
+else
+ echo "TEST_7 OK"
+fi
+
+echo "TEST_8 Test loading a config file and overriding one of the variables with an env var."
+TEST_8_EXPECTED='{"account-domain":"peepee","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"gts.example.org","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_8="$(GTS_ACCOUNT_DOMAIN='peepee' go run ./cmd/gotosocial/... --config-path ./test/test.yaml debug config)"
+if [ "${TEST_8}" != "${TEST_8_EXPECTED}" ]; then
+ echo "TEST_8 not equal TEST_8_EXPECTED"
+ exit 1
+else
+ echo "TEST_8 OK"
+fi
+
+echo "TEST_9 Test loading a config file and overriding one of the variables with both an env var and a cli flag. The cli flag should have priority."
+TEST_9_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"gts.example.org","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_9="$(GTS_ACCOUNT_DOMAIN='peepee' go run ./cmd/gotosocial/... --config-path ./test/test.yaml --account-domain '' debug config)"
+if [ "${TEST_9}" != "${TEST_9_EXPECTED}" ]; then
+ echo "TEST_9 not equal TEST_9_EXPECTED"
+ exit 1
+else
+ echo "TEST_9 OK"
+fi
+
+echo "TEST_10 Test loading a config file from json."
+TEST_10_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.json","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"gts.example.org","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_10="$(go run ./cmd/gotosocial/... --config-path ./test/test.json debug config)"
+if [ "${TEST_10}" != "${TEST_10_EXPECTED}" ]; then
+ echo "TEST_10 not equal TEST_10_EXPECTED"
+ exit 1
+else
+ echo "TEST_10 OK"
+fi
+
+echo "TEST_11 Test loading a partial config file. Default values should be used apart from those set in the config file."
+TEST_11_EXPECTED='{"account-domain":"peepee.poopoo","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test2.yaml","db-address":"localhost","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","help":false,"host":"","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-level":"trace","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-base-path":"/gotosocial/storage","storage-serve-base-path":"/fileserver","storage-serve-host":"localhost","storage-serve-protocol":"https","trusted-proxies":["127.0.0.1/32"],"web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
+TEST_11="$(go run ./cmd/gotosocial/... --config-path ./test/test2.yaml debug config)"
+if [ "${TEST_11}" != "${TEST_11_EXPECTED}" ]; then
+ echo "TEST_11 not equal TEST_11_EXPECTED"
+ exit 1
+else
+ echo "TEST_11 OK"
+fi
+
+echo "FINISHED CLI TESTS"
diff --git a/test/test.json b/test/test.json
new file mode 100644
index 000000000..3d0387ea2
--- /dev/null
+++ b/test/test.json
@@ -0,0 +1,64 @@
+{
+ "account-domain": "example.org",
+ "accounts-approval-required": true,
+ "accounts-reason-required": true,
+ "accounts-registration-open": true,
+ "application-name": "gotosocial",
+ "bind-address": "0.0.0.0",
+ "config-path": "./test/test.yaml",
+ "db-address": "127.0.0.1",
+ "db-database": "postgres",
+ "db-password": "postgres",
+ "db-port": 5432,
+ "db-tls-ca-cert": "",
+ "db-tls-mode": "disable",
+ "db-type": "postgres",
+ "db-user": "postgres",
+ "help": false,
+ "host": "gts.example.org",
+ "letsencrypt-cert-dir": "/gotosocial/storage/certs",
+ "letsencrypt-email-address": "",
+ "letsencrypt-enabled": true,
+ "letsencrypt-port": 80,
+ "log-level": "info",
+ "media-description-max-chars": 500,
+ "media-description-min-chars": 0,
+ "media-image-max-size": 2097152,
+ "media-video-max-size": 10485760,
+ "oidc-client-id": "",
+ "oidc-client-secret": "",
+ "oidc-enabled": false,
+ "oidc-idp-name": "",
+ "oidc-issuer": "",
+ "oidc-scopes": [
+ "openid",
+ "email",
+ "profile",
+ "groups"
+ ],
+ "oidc-skip-verification": false,
+ "port": 8080,
+ "protocol": "https",
+ "smtp-from": "someone@example.org",
+ "smtp-host": "verycoolemailhost.mail",
+ "smtp-password": "smtp-password",
+ "smtp-port": 8888,
+ "smtp-username": "smtp-username",
+ "software-version": "",
+ "statuses-cw-max-chars": 100,
+ "statuses-max-chars": 5000,
+ "statuses-media-max-files": 6,
+ "statuses-poll-max-options": 6,
+ "statuses-poll-option-max-chars": 50,
+ "storage-backend": "local",
+ "storage-base-path": "/gotosocial/storage",
+ "storage-serve-base-path": "/fileserver",
+ "storage-serve-host": "localhost",
+ "storage-serve-protocol": "https",
+ "trusted-proxies": [
+ "127.0.0.1/32",
+ "0.0.0.0/0"
+ ],
+ "web-asset-base-dir": "./web/assets/",
+ "web-template-base-dir": "./web/template/"
+}
diff --git a/test/test.yaml b/test/test.yaml
new file mode 100644
index 000000000..467ab920f
--- /dev/null
+++ b/test/test.yaml
@@ -0,0 +1,399 @@
+# GoToSocial
+# Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+###########################
+##### GENERAL CONFIG ######
+###########################
+
+# String. Log level to use throughout the application. Must be lower-case.
+# Options: ["trace","debug","info","warn","error","fatal"]
+# Default: "info"
+log-level: "info"
+
+# String. Application name to use internally.
+# Examples: ["My Application","gotosocial"]
+# Default: "gotosocial"
+application-name: "gotosocial"
+
+# String. Hostname that this server will be reachable at. Defaults to localhost for local testing,
+# but you should *definitely* change this when running for real, or your server won't work at all.
+# DO NOT change this after your server has already run once, or you will break things!
+# Examples: ["gts.example.org","some.server.com"]
+# Default: "localhost"
+host: "gts.example.org"
+
+# String. Domain to use when federating profiles. This is useful when you want your server to be at
+# eg., "gts.example.org", but you want the domain on accounts to be "example.org" because it looks better
+# or is just shorter/easier to remember.
+# To make this setting work properly, you need to redirect requests at "example.org/.well-known/webfinger"
+# to "gts.example.org/.well-known/webfinger" so that GtS can handle them properly.
+# You should also redirect requests at "example.org/.well-known/nodeinfo" in the same way.
+# An empty string (ie., not set) means that the same value as 'host' will be used.
+# DO NOT change this after your server has already run once, or you will break things!
+# Examples: ["example.org","server.com"]
+# Default: ""
+account-domain: "example.org"
+
+# String. Protocol to use for the server. Only change to http for local testing!
+# This should be the protocol part of the URI that your server is actually reachable on. So even if you're
+# running GoToSocial behind a reverse proxy that handles SSL certificates for you, instead of using built-in
+# letsencrypt, it should still be https.
+# Options: ["http","https"]
+# Default: "https"
+protocol: "https"
+
+# String. Address to bind the GoToSocial server to.
+# This can be an IPv4 address or an IPv6 address (surrounded in square brackets), or a hostname.
+# Default value will bind to all interfaces.
+# You probably won't need to change this unless you're setting GoToSocial up in some fancy way or
+# you have specific networking requirements.
+# Examples: ["0.0.0.0", "172.128.0.16", "localhost", "[::]", "[2001:db8::fed1]"]
+# Default: "0.0.0.0"
+bind-address: "0.0.0.0"
+
+# Int. Listen port for the GoToSocial webserver + API. If you're running behind a reverse proxy and/or in a docker,
+# container, just set this to whatever you like (or leave the default), and make sure it's forwarded properly.
+# If you are running with built-in letsencrypt enabled, and running GoToSocial directly on a host machine, you will
+# probably want to set this to 443 (standard https port), unless you have other services already using that port.
+# This *MUST NOT* be the same as the letsencrypt port specified below, unless letsencrypt is turned off.
+# Examples: [443, 6666, 8080]
+# Default: 8080
+port: 8080
+
+# Array of string. CIDRs or IP addresses of proxies that should be trusted when determining real client IP from behind a reverse proxy.
+# If you're running inside a Docker container behind Traefik or Nginx, for example, add the subnet of your docker network,
+# or the gateway of the docker network, and/or the address of the reverse proxy (if it's not running on the host network).
+# Example: ["127.0.0.1/32", "172.20.0.1"]
+# Default: ["127.0.0.1/32"] (localhost)
+trusted-proxies:
+ - "127.0.0.1/32"
+ - "0.0.0.0/0"
+
+############################
+##### DATABASE CONFIG ######
+############################
+
+# Config pertaining to the Gotosocial database connection
+
+# String. Database type.
+# Options: ["postgres","sqlite"]
+# Default: "postgres"
+db-type: "postgres"
+
+# String. Database address or parameters.
+# Examples: ["localhost","my.db.host","127.0.0.1","192.111.39.110",":memory:"]
+# Default: "localhost"
+db-address: "127.0.0.1"
+
+# Int. Port for database connection.
+# Examples: [5432, 1234, 6969]
+# Default: 5432
+db-port: 5432
+
+# String. Username for the database connection.
+# Examples: ["mydbuser","postgres","gotosocial"]
+# Default: "postgres"
+db-user: "postgres"
+
+# REQUIRED
+# String. Password to use for the database connection
+# Examples: ["password123","verysafepassword","postgres"]
+# Default: "postgres"
+db-password: "postgres"
+
+# String. Name of the database to use within the provided database type.
+# Examples: ["mydb","postgres","gotosocial"]
+# Default: "postgres"
+db-database: "postgres"
+
+# String. Disable, enable, or require SSL/TLS connection to the database.
+# If "disable" then no TLS connection will be attempted.
+# If "enable" then TLS will be tried, but the database certificate won't be checked (for self-signed certs).
+# If "require" then TLS will be required to make a connection, and a valid certificate must be presented.
+# Options: ["disable", "enable", "require"]
+# Default: "disable"
+db-tls-mode: "disable"
+
+# String. Path to a CA certificate on the host machine for db certificate validation.
+# If this is left empty, just the host certificates will be used.
+# If filled in, the certificate will be loaded and added to host certificates.
+# Examples: ["/path/to/some/cert.crt"]
+# Default: ""
+db-tls-ca-cert: ""
+
+######################
+##### WEB CONFIG #####
+######################
+
+# Config pertaining to templating and serving of web pages/email notifications and the like
+
+# String. Directory from which gotosocial will attempt to load html templates (.tmpl files).
+# Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
+# Default: "./web/template/"
+web-template-base-dir: "./web/template/"
+
+# String. Directory from which gotosocial will attempt to serve static web assets (images, scripts).
+# Examples: ["/some/absolute/path/", "./relative/path/", "../../some/weird/path/"]
+# Default: "./web/assets/"
+web-asset-base-dir: "./web/assets/"
+
+###########################
+##### ACCOUNTS CONFIG #####
+###########################
+
+# Config pertaining to creation and maintenance of accounts on the server, as well as defaults for new accounts.
+
+# Bool. Do we want people to be able to just submit sign up requests, or do we want invite only?
+# Options: [true, false]
+# Default: true
+accounts-registration-open: true
+
+# Bool. Do sign up requests require approval from an admin/moderator before an account can sign in/use the server?
+# Options: [true, false]
+# Default: true
+accounts-approval-required: true
+
+# Bool. Are sign up requests required to submit a reason for the request (eg., an explanation of why they want to join the instance)?
+# Options: [true, false]
+# Default: true
+accounts-reason-required: true
+
+########################
+##### MEDIA CONFIG #####
+########################
+
+# Config pertaining to user media uploads (videos, image, image descriptions).
+
+# Int. Maximum allowed image upload size in bytes.
+# Examples: [2097152, 10485760]
+# Default: 2097152 -- aka 2MB
+media-image-max-size: 2097152
+
+# Int. Maximum allowed video upload size in bytes.
+# Examples: [2097152, 10485760]
+# Default: 10485760 -- aka 10MB
+media-video-max-size: 10485760
+
+# Int. Minimum amount of characters required as an image or video description.
+# Examples: [500, 1000, 1500]
+# Default: 0 (not required)
+media-description-min-chars: 0
+
+# Int. Maximum amount of characters permitted in an image or video description.
+# Examples: [500, 1000, 1500]
+# Default: 500
+media-description-max-chars: 500
+
+##########################
+##### STORAGE CONFIG #####
+##########################
+
+# Config pertaining to storage of user-created uploads (videos, images, etc).
+
+# String. Type of storage backend to use.
+# Examples: ["local", "s3"]
+# Default: "local" (storage on local disk)
+# NOTE: s3 storage is not yet supported!
+storage-backend: "local"
+
+# String. Directory to use as a base path for storing files.
+# Make sure whatever user/group gotosocial is running as has permission to access
+# this directly, and create new subdirectories and files with in.
+# Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
+# Default: "/gotosocial/storage"
+storage-base-path: "/gotosocial/storage"
+
+# String. Protocol to use for serving stored files.
+# It's very unlikely that you'll need to change this ever, but there might be edge cases.
+# Examples: ["http", "https"]
+storage-serve-protocol: "https"
+
+# String. Host for serving stored files.
+# If you're using local storage, this should be THE SAME as the value you've set for Host, above.
+# It should only be a different value if you're serving stored files from a host
+# other than the one your instance is running on.
+# Examples: ["localhost", "example.org"]
+# Default: "localhost" -- you should absolutely change this.
+storage-serve-host: "localhost"
+
+# String. Base path for serving stored files. This will be added to serveHost and serveProtocol
+# to form the prefix url of your stored files. Eg., https://example.org/fileserver/.....
+# It's unlikely that you will need to change this.
+# Examples: ["/fileserver", "/media"]
+# Default: "/fileserver"
+storage-serve-base-path: "/fileserver"
+
+###########################
+##### STATUSES CONFIG #####
+###########################
+
+# Config pertaining to the creation of statuses/posts, and permitted limits.
+
+# Int. Maximum amount of characters permitted for a new status.
+# Note that going way higher than the default might break federation.
+# Examples: [140, 500, 5000]
+# Default: 5000
+statuses-max-chars: 5000
+
+# Int. Maximum amount of characters allowed in the CW/subject header of a status.
+# Note that going way higher than the default might break federation.
+# Examples: [100, 200]
+# Default: 100
+statuses-cw-max-chars: 100
+
+# Int. Maximum amount of options to permit when creating a new poll.
+# Note that going way higher than the default might break federation.
+# Examples: [4, 6, 10]
+# Default: 6
+statuses-poll-max-options: 6
+
+# Int. Maximum amount of characters to permit per poll option when creating a new poll.
+# Note that going way higher than the default might break federation.
+# Examples: [50, 100, 150]
+# Default: 50
+statuses-poll-option-max-chars: 50
+
+# Int. Maximum amount of media files that can be attached to a new status.
+# Note that going way higher than the default might break federation.
+# Examples: [4, 6, 10]
+# Default: 6
+statuses-media-max-files: 6
+
+##############################
+##### LETSENCRYPT CONFIG #####
+##############################
+
+# Config pertaining to the automatic acquisition and use of LetsEncrypt HTTPS certificates.
+
+# Bool. Whether or not letsencrypt should be enabled for the server.
+# If false, the rest of the settings here will be ignored.
+# You should only change this if you want to serve GoToSocial behind a reverse proxy
+# like Traefik, HAProxy, or Nginx.
+# Options: [true, false]
+# Default: true
+letsencrypt-enabled: true
+
+# Int. Port to listen for letsencrypt certificate challenges on.
+# If letsencrypt is enabled, this port must be reachable or you won't be able to obtain certs.
+# If letsencrypt is disabled, this port will not be used.
+# This *must not* be the same as the webserver/API port specified above.
+# Examples: [80, 8000, 1312]
+# Default: 80
+letsencrypt-port: 80
+
+# String. Directory in which to store LetsEncrypt certificates.
+# It is a good move to make this a sub-path within your storage directory, as it makes
+# backup easier, but you might wish to move them elsewhere if they're also accessed by other services.
+# In any case, make sure GoToSocial has permissions to write to / read from this directory.
+# Examples: ["/home/gotosocial/storage/certs", "/acmecerts"]
+# Default: "/gotosocial/storage/certs"
+letsencrypt-cert-dir: "/gotosocial/storage/certs"
+
+# String. Email address to use when registering LetsEncrypt certs.
+# Most likely, this will be the email address of the instance administrator.
+# LetsEncrypt will send notifications about expiring certificates etc to this address.
+# Examples: ["admin@example.org"]
+# Default: ""
+letsencrypt-email-address: ""
+
+#######################
+##### OIDC CONFIG #####
+#######################
+
+# Config for authentication with an external OIDC provider (Dex, Google, Auth0, etc).
+
+# Bool. Enable authentication with external OIDC provider. If set to true, then
+# the other OIDC options must be set as well. If this is set to false, then the standard
+# internal oauth flow will be used, where users sign in to GtS with username/password.
+# Options: [true, false]
+# Default: false
+oidc-enabled: false
+
+# String. Name of the oidc idp (identity provider). This will be shown to users when
+# they log in.
+# Examples: ["Google", "Dex", "Auth0"]
+# Default: ""
+oidc-idp-name: ""
+
+# Bool. Skip the normal verification flow of tokens returned from the OIDC provider, ie.,
+# don't check the expiry or signature. This should only be used in debugging or testing,
+# never ever in a production environment as it's extremely unsafe!
+# Options: [true, false]
+# Default: false
+oidc-skip-verification: false
+
+# String. The OIDC issuer URI. This is where GtS will redirect users to for login.
+# Typically this will look like a standard web URL.
+# Examples: ["https://auth.example.org", "https://example.org/auth"]
+# Default: ""
+oidc-issuer: ""
+
+# String. The ID for this client as registered with the OIDC provider.
+# Examples: ["some-client-id", "fda3772a-ad35-41c9-9a59-f1943ad18f54"]
+# Default: ""
+oidc-client-id: ""
+
+# String. The secret for this client as registered with the OIDC provider.
+# Examples: ["super-secret-business", "79379cf5-8057-426d-bb83-af504d98a7b0"]
+# Default: ""
+oidc-client-secret: ""
+
+# Array of string. Scopes to request from the OIDC provider. The returned values will be used to
+# populate users created in GtS as a result of the authentication flow. 'openid' and 'email' are required.
+# 'profile' is used to extract a username for the newly created user.
+# 'groups' is optional and can be used to determine if a user is an admin (if they're in the group 'admin' or 'admins').
+# Examples: See eg., https://auth0.com/docs/scopes/openid-connect-scopes
+# Default: ["openid", "email", "profile", "groups"]
+oidc-scopes:
+ - "openid"
+ - "email"
+ - "profile"
+ - "groups"
+
+#######################
+##### SMTP CONFIG #####
+#######################
+
+# Config for sending emails via an smtp server. See https://en.wikipedia.org/wiki/Simple_Mail_Transfer_Protocol
+
+# String. The hostname of the smtp server you want to use.
+# If this is not set, smtp will not be used to send emails, and you can ignore the other settings.
+# Examples: ["mail.example.org", "localhost"]
+# Default: ""
+smtp-host: "verycoolemailhost.mail"
+
+# Int. Port to use to connect to the smtp server.
+# Examples: []
+# Default: 0
+smtp-port: 8888
+
+# String. Username to use when authenticating with the smtp server.
+# This should have been provided to you by your smtp host.
+# This is often, but not always, an email address.
+# Examples: ["maillord@example.org"]
+# Default: ""
+smtp-username: "smtp-username"
+
+# String. Password to use when authenticating with the smtp server.
+# This should have been provided to you by your smtp host.
+# Examples: ["1234", "password"]
+# Default: ""
+smtp-password: "smtp-password"
+
+# String. 'From' address for sent emails.
+# Examples: ["mail@example.org"]
+# Default: ""
+smtp-from: "someone@example.org"
diff --git a/test/test2.yaml b/test/test2.yaml
new file mode 100644
index 000000000..f26ac7ffb
--- /dev/null
+++ b/test/test2.yaml
@@ -0,0 +1,2 @@
+log-level: "trace"
+account-domain: "peepee.poopoo"
diff --git a/testrig/config.go b/testrig/config.go
index f7028b1b5..be5efab61 100644
--- a/testrig/config.go
+++ b/testrig/config.go
@@ -18,9 +18,103 @@
package testrig
-import "github.com/superseriousbusiness/gotosocial/internal/config"
+import (
+ "reflect"
-// NewTestConfig returns a config initialized with test defaults
-func NewTestConfig() *config.Config {
- return config.TestDefault()
+ "github.com/coreos/go-oidc/v3/oidc"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+)
+
+// InitTestConfig resets + initializes the viper configuration with test defaults.
+func InitTestConfig() {
+ // reset viper to an empty state
+ viper.Reset()
+
+ // get the field names of config.Keys
+ keyFields := reflect.VisibleFields(reflect.TypeOf(config.Keys))
+
+ // get the field values of config.Keys
+ keyValues := reflect.ValueOf(config.Keys)
+
+ // get the field values of TestDefaults
+ testDefaults := reflect.ValueOf(TestDefaults)
+
+ // for each config field...
+ for _, field := range keyFields {
+ // the viper config key should be the value of the key
+ key, ok := keyValues.FieldByName(field.Name).Interface().(string)
+ if !ok {
+ panic("could not convert config.Keys value to string")
+ }
+
+ // the value should be the test default corresponding to the given fieldName
+ value := testDefaults.FieldByName(field.Name).Interface()
+
+ // actually set the value in viper -- this will override everything
+ viper.Set(key, value)
+ }
+}
+
+// TestDefaults returns a Values struct with values set that are suitable for local testing.
+var TestDefaults = config.Values{
+ LogLevel: "trace",
+ ApplicationName: "gotosocial",
+ ConfigPath: "",
+ Host: "localhost:8080",
+ AccountDomain: "localhost:8080",
+ Protocol: "http",
+ BindAddress: "127.0.0.1",
+ Port: 8080,
+ TrustedProxies: []string{"127.0.0.1/32"},
+
+ DbType: "sqlite",
+ DbAddress: ":memory:",
+ DbPort: 5432,
+ DbUser: "postgres",
+ DbPassword: "postgres",
+ DbDatabase: "postgres",
+
+ WebTemplateBaseDir: "./web/template/",
+ WebAssetBaseDir: "./web/assets/",
+
+ AccountsRegistrationOpen: true,
+ AccountsApprovalRequired: true,
+ AccountsReasonRequired: true,
+
+ MediaImageMaxSize: 1048576, // 1mb
+ MediaVideoMaxSize: 5242880, // 5mb
+ MediaDescriptionMinChars: 0,
+ MediaDescriptionMaxChars: 500,
+
+ StorageBackend: "local",
+ StorageBasePath: "/gotosocial/storage",
+ StorageServeProtocol: "http",
+ StorageServeHost: "localhost:8080",
+ StorageServeBasePath: "/fileserver",
+
+ StatusesMaxChars: 5000,
+ StatusesCWMaxChars: 100,
+ StatusesPollMaxOptions: 6,
+ StatusesPollOptionMaxChars: 50,
+ StatusesMediaMaxFiles: 6,
+
+ LetsEncryptEnabled: false,
+ LetsEncryptPort: 0,
+ LetsEncryptCertDir: "",
+ LetsEncryptEmailAddress: "",
+
+ OIDCEnabled: false,
+ OIDCIdpName: "",
+ OIDCSkipVerification: false,
+ OIDCIssuer: "",
+ OIDCClientID: "",
+ OIDCClientSecret: "",
+ OIDCScopes: []string{oidc.ScopeOpenID, "profile", "email", "groups"},
+
+ SMTPHost: "",
+ SMTPPort: 0,
+ SMTPUsername: "",
+ SMTPPassword: "",
+ SMTPFrom: "GoToSocial",
}
diff --git a/testrig/db.go b/testrig/db.go
index 488d30734..d2146ab34 100644
--- a/testrig/db.go
+++ b/testrig/db.go
@@ -24,6 +24,8 @@ import (
"strconv"
"github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/db/bundb"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
@@ -66,27 +68,23 @@ var testModels = []interface{}{
// If the environment variable GTS_DB_PORT is set, it will take that
// value as the port instead.
func NewTestDB() db.DB {
- config := NewTestConfig()
- alternateAddress := os.Getenv("GTS_DB_ADDRESS")
- if alternateAddress != "" {
- config.DBConfig.Address = alternateAddress
+ if alternateAddress := os.Getenv("GTS_DB_ADDRESS"); alternateAddress != "" {
+ viper.Set(config.Keys.DbAddress, alternateAddress)
}
- alternateDBType := os.Getenv("GTS_DB_TYPE")
- if alternateDBType != "" {
- config.DBConfig.Type = alternateDBType
+ if alternateDBType := os.Getenv("GTS_DB_TYPE"); alternateDBType != "" {
+ viper.Set(config.Keys.DbType, alternateDBType)
}
- alternateDBPort := os.Getenv("GTS_DB_PORT")
- if alternateDBPort != "" {
+ if alternateDBPort := os.Getenv("GTS_DB_PORT"); alternateDBPort != "" {
port, err := strconv.ParseInt(alternateDBPort, 10, 64)
if err != nil {
panic(err)
}
- config.DBConfig.Port = int(port)
+ viper.Set(config.Keys.DbPort, port)
}
- testDB, err := bundb.NewBunDBService(context.Background(), config)
+ testDB, err := bundb.NewBunDBService(context.Background())
if err != nil {
logrus.Panic(err)
}
diff --git a/testrig/email.go b/testrig/email.go
index 60ee96ac2..714025d85 100644
--- a/testrig/email.go
+++ b/testrig/email.go
@@ -18,7 +18,11 @@
package testrig
-import "github.com/superseriousbusiness/gotosocial/internal/email"
+import (
+ "github.com/spf13/viper"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+ "github.com/superseriousbusiness/gotosocial/internal/email"
+)
// NewEmailSender returns a noop email sender that won't make any remote calls.
//
@@ -26,6 +30,8 @@ import "github.com/superseriousbusiness/gotosocial/internal/email"
// the map, with email address of the recipient as the key, and the value as the
// parsed email message as it would have been sent.
func NewEmailSender(templateBaseDir string, sentEmails map[string]string) email.Sender {
+ viper.Set(config.Keys.WebTemplateBaseDir, templateBaseDir)
+
var sendCallback func(toAddress string, message string)
if sentEmails != nil {
@@ -34,7 +40,7 @@ func NewEmailSender(templateBaseDir string, sentEmails map[string]string) email.
}
}
- s, err := email.NewNoopSender(templateBaseDir, sendCallback)
+ s, err := email.NewNoopSender(sendCallback)
if err != nil {
panic(err)
}
diff --git a/testrig/federatingdb.go b/testrig/federatingdb.go
index b0ef511bc..f01a4e9fe 100644
--- a/testrig/federatingdb.go
+++ b/testrig/federatingdb.go
@@ -7,5 +7,5 @@ import (
// NewTestFederatingDB returns a federating DB with the underlying db
func NewTestFederatingDB(db db.DB) federatingdb.DB {
- return federatingdb.New(db, NewTestConfig())
+ return federatingdb.New(db)
}
diff --git a/testrig/federator.go b/testrig/federator.go
index 42474566b..e1050ad9f 100644
--- a/testrig/federator.go
+++ b/testrig/federator.go
@@ -27,5 +27,5 @@ import (
// NewTestFederator returns a federator with the given database and (mock!!) transport controller.
func NewTestFederator(db db.DB, tc transport.Controller, storage *kv.KVStore) federation.Federator {
- return federation.NewFederator(db, NewTestFederatingDB(db), tc, NewTestConfig(), NewTestTypeConverter(db), NewTestMediaHandler(db, storage))
+ return federation.NewFederator(db, NewTestFederatingDB(db), tc, NewTestTypeConverter(db), NewTestMediaHandler(db, storage))
}
diff --git a/testrig/mediahandler.go b/testrig/mediahandler.go
index a6b5cfec7..abc714551 100644
--- a/testrig/mediahandler.go
+++ b/testrig/mediahandler.go
@@ -26,5 +26,5 @@ import (
// NewTestMediaHandler returns a media handler with the default test config, and the given db and storage.
func NewTestMediaHandler(db db.DB, storage *kv.KVStore) media.Handler {
- return media.New(NewTestConfig(), db, storage)
+ return media.New(db, storage)
}
diff --git a/testrig/processor.go b/testrig/processor.go
index 900d60b18..c09748e3c 100644
--- a/testrig/processor.go
+++ b/testrig/processor.go
@@ -28,5 +28,5 @@ import (
// NewTestProcessor returns a Processor suitable for testing purposes
func NewTestProcessor(db db.DB, storage *kv.KVStore, federator federation.Federator, emailSender email.Sender) processing.Processor {
- return processing.NewProcessor(NewTestConfig(), NewTestTypeConverter(db), federator, NewTestOauthServer(db), NewTestMediaHandler(db, storage), storage, NewTestTimelineManager(db), db, emailSender)
+ return processing.NewProcessor(NewTestTypeConverter(db), federator, NewTestOauthServer(db), NewTestMediaHandler(db, storage), storage, NewTestTimelineManager(db), db, emailSender)
}
diff --git a/testrig/router.go b/testrig/router.go
index 8efe1e4de..4bfa57603 100644
--- a/testrig/router.go
+++ b/testrig/router.go
@@ -27,7 +27,7 @@ import (
// NewTestRouter returns a Router suitable for testing
func NewTestRouter(db db.DB) router.Router {
- r, err := router.New(context.Background(), NewTestConfig(), db)
+ r, err := router.New(context.Background(), db)
if err != nil {
panic(err)
}
diff --git a/testrig/timelinemanager.go b/testrig/timelinemanager.go
index dd39f6548..4b316a52e 100644
--- a/testrig/timelinemanager.go
+++ b/testrig/timelinemanager.go
@@ -7,5 +7,5 @@ import (
// NewTestTimelineManager retuts a new timeline.Manager, suitable for testing, using the given db.
func NewTestTimelineManager(db db.DB) timeline.Manager {
- return timeline.NewManager(db, NewTestTypeConverter(db), NewTestConfig())
+ return timeline.NewManager(db, NewTestTypeConverter(db))
}
diff --git a/testrig/transportcontroller.go b/testrig/transportcontroller.go
index c82b55f69..d383e096c 100644
--- a/testrig/transportcontroller.go
+++ b/testrig/transportcontroller.go
@@ -39,7 +39,7 @@ import (
// PER TEST rather than per suite, so that the do function can be set on a test by test (or even more granular)
// basis.
func NewTestTransportController(client pub.HttpClient, db db.DB) transport.Controller {
- return transport.NewController(NewTestConfig(), db, &federation.Clock{}, client)
+ return transport.NewController(db, &federation.Clock{}, client)
}
// NewMockHTTPClient returns a client that conforms to the pub.HttpClient interface,
diff --git a/testrig/typeconverter.go b/testrig/typeconverter.go
index 9d49e6c99..bf064c245 100644
--- a/testrig/typeconverter.go
+++ b/testrig/typeconverter.go
@@ -25,5 +25,5 @@ import (
// NewTestTypeConverter returned a type converter with the given db and the default test config
func NewTestTypeConverter(db db.DB) typeutils.TypeConverter {
- return typeutils.NewConverter(NewTestConfig(), db)
+ return typeutils.NewConverter(db)
}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
deleted file mode 100644
index b48005673..000000000
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package md2man
-
-import (
- "github.com/russross/blackfriday/v2"
-)
-
-// Render converts a markdown document into a roff formatted document.
-func Render(doc []byte) []byte {
- renderer := NewRoffRenderer()
-
- return blackfriday.Run(doc,
- []blackfriday.Option{blackfriday.WithRenderer(renderer),
- blackfriday.WithExtensions(renderer.GetExtensions())}...)
-}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
deleted file mode 100644
index be2b34360..000000000
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
+++ /dev/null
@@ -1,336 +0,0 @@
-package md2man
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-
- "github.com/russross/blackfriday/v2"
-)
-
-// roffRenderer implements the blackfriday.Renderer interface for creating
-// roff format (manpages) from markdown text
-type roffRenderer struct {
- extensions blackfriday.Extensions
- listCounters []int
- firstHeader bool
- firstDD bool
- listDepth int
-}
-
-const (
- titleHeader = ".TH "
- topLevelHeader = "\n\n.SH "
- secondLevelHdr = "\n.SH "
- otherHeader = "\n.SS "
- crTag = "\n"
- emphTag = "\\fI"
- emphCloseTag = "\\fP"
- strongTag = "\\fB"
- strongCloseTag = "\\fP"
- breakTag = "\n.br\n"
- paraTag = "\n.PP\n"
- hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
- linkTag = "\n\\[la]"
- linkCloseTag = "\\[ra]"
- codespanTag = "\\fB\\fC"
- codespanCloseTag = "\\fR"
- codeTag = "\n.PP\n.RS\n\n.nf\n"
- codeCloseTag = "\n.fi\n.RE\n"
- quoteTag = "\n.PP\n.RS\n"
- quoteCloseTag = "\n.RE\n"
- listTag = "\n.RS\n"
- listCloseTag = "\n.RE\n"
- dtTag = "\n.TP\n"
- dd2Tag = "\n"
- tableStart = "\n.TS\nallbox;\n"
- tableEnd = ".TE\n"
- tableCellStart = "T{\n"
- tableCellEnd = "\nT}\n"
-)
-
-// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
-// from markdown
-func NewRoffRenderer() *roffRenderer { // nolint: golint
- var extensions blackfriday.Extensions
-
- extensions |= blackfriday.NoIntraEmphasis
- extensions |= blackfriday.Tables
- extensions |= blackfriday.FencedCode
- extensions |= blackfriday.SpaceHeadings
- extensions |= blackfriday.Footnotes
- extensions |= blackfriday.Titleblock
- extensions |= blackfriday.DefinitionLists
- return &roffRenderer{
- extensions: extensions,
- }
-}
-
-// GetExtensions returns the list of extensions used by this renderer implementation
-func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
- return r.extensions
-}
-
-// RenderHeader handles outputting the header at document start
-func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
- // disable hyphenation
- out(w, ".nh\n")
-}
-
-// RenderFooter handles outputting the footer at the document end; the roff
-// renderer has no footer information
-func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
-}
-
-// RenderNode is called for each node in a markdown document; based on the node
-// type the equivalent roff output is sent to the writer
-func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
-
- var walkAction = blackfriday.GoToNext
-
- switch node.Type {
- case blackfriday.Text:
- escapeSpecialChars(w, node.Literal)
- case blackfriday.Softbreak:
- out(w, crTag)
- case blackfriday.Hardbreak:
- out(w, breakTag)
- case blackfriday.Emph:
- if entering {
- out(w, emphTag)
- } else {
- out(w, emphCloseTag)
- }
- case blackfriday.Strong:
- if entering {
- out(w, strongTag)
- } else {
- out(w, strongCloseTag)
- }
- case blackfriday.Link:
- if !entering {
- out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
- }
- case blackfriday.Image:
- // ignore images
- walkAction = blackfriday.SkipChildren
- case blackfriday.Code:
- out(w, codespanTag)
- escapeSpecialChars(w, node.Literal)
- out(w, codespanCloseTag)
- case blackfriday.Document:
- break
- case blackfriday.Paragraph:
- // roff .PP markers break lists
- if r.listDepth > 0 {
- return blackfriday.GoToNext
- }
- if entering {
- out(w, paraTag)
- } else {
- out(w, crTag)
- }
- case blackfriday.BlockQuote:
- if entering {
- out(w, quoteTag)
- } else {
- out(w, quoteCloseTag)
- }
- case blackfriday.Heading:
- r.handleHeading(w, node, entering)
- case blackfriday.HorizontalRule:
- out(w, hruleTag)
- case blackfriday.List:
- r.handleList(w, node, entering)
- case blackfriday.Item:
- r.handleItem(w, node, entering)
- case blackfriday.CodeBlock:
- out(w, codeTag)
- escapeSpecialChars(w, node.Literal)
- out(w, codeCloseTag)
- case blackfriday.Table:
- r.handleTable(w, node, entering)
- case blackfriday.TableHead:
- case blackfriday.TableBody:
- case blackfriday.TableRow:
- // no action as cell entries do all the nroff formatting
- return blackfriday.GoToNext
- case blackfriday.TableCell:
- r.handleTableCell(w, node, entering)
- case blackfriday.HTMLSpan:
- // ignore other HTML tags
- default:
- fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
- }
- return walkAction
-}
-
-func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
- if entering {
- switch node.Level {
- case 1:
- if !r.firstHeader {
- out(w, titleHeader)
- r.firstHeader = true
- break
- }
- out(w, topLevelHeader)
- case 2:
- out(w, secondLevelHdr)
- default:
- out(w, otherHeader)
- }
- }
-}
-
-func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
- openTag := listTag
- closeTag := listCloseTag
- if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
- // tags for definition lists handled within Item node
- openTag = ""
- closeTag = ""
- }
- if entering {
- r.listDepth++
- if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
- r.listCounters = append(r.listCounters, 1)
- }
- out(w, openTag)
- } else {
- if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
- r.listCounters = r.listCounters[:len(r.listCounters)-1]
- }
- out(w, closeTag)
- r.listDepth--
- }
-}
-
-func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
- if entering {
- if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
- out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
- r.listCounters[len(r.listCounters)-1]++
- } else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
- // DT (definition term): line just before DD (see below).
- out(w, dtTag)
- r.firstDD = true
- } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
- // DD (definition description): line that starts with ": ".
- //
- // We have to distinguish between the first DD and the
- // subsequent ones, as there should be no vertical
- // whitespace between the DT and the first DD.
- if r.firstDD {
- r.firstDD = false
- } else {
- out(w, dd2Tag)
- }
- } else {
- out(w, ".IP \\(bu 2\n")
- }
- } else {
- out(w, "\n")
- }
-}
-
-func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
- if entering {
- out(w, tableStart)
- // call walker to count cells (and rows?) so format section can be produced
- columns := countColumns(node)
- out(w, strings.Repeat("l ", columns)+"\n")
- out(w, strings.Repeat("l ", columns)+".\n")
- } else {
- out(w, tableEnd)
- }
-}
-
-func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
- if entering {
- var start string
- if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
- start = "\t"
- }
- if node.IsHeader {
- start += codespanTag
- } else if nodeLiteralSize(node) > 30 {
- start += tableCellStart
- }
- out(w, start)
- } else {
- var end string
- if node.IsHeader {
- end = codespanCloseTag
- } else if nodeLiteralSize(node) > 30 {
- end = tableCellEnd
- }
- if node.Next == nil && end != tableCellEnd {
- // Last cell: need to carriage return if we are at the end of the
- // header row and content isn't wrapped in a "tablecell"
- end += crTag
- }
- out(w, end)
- }
-}
-
-func nodeLiteralSize(node *blackfriday.Node) int {
- total := 0
- for n := node.FirstChild; n != nil; n = n.FirstChild {
- total += len(n.Literal)
- }
- return total
-}
-
-// because roff format requires knowing the column count before outputting any table
-// data we need to walk a table tree and count the columns
-func countColumns(node *blackfriday.Node) int {
- var columns int
-
- node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
- switch node.Type {
- case blackfriday.TableRow:
- if !entering {
- return blackfriday.Terminate
- }
- case blackfriday.TableCell:
- if entering {
- columns++
- }
- default:
- }
- return blackfriday.GoToNext
- })
- return columns
-}
-
-func out(w io.Writer, output string) {
- io.WriteString(w, output) // nolint: errcheck
-}
-
-func escapeSpecialChars(w io.Writer, text []byte) {
- for i := 0; i < len(text); i++ {
- // escape initial apostrophe or period
- if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
- out(w, "\\&")
- }
-
- // directly copy normal characters
- org := i
-
- for i < len(text) && text[i] != '\\' {
- i++
- }
- if i > org {
- w.Write(text[org:i]) // nolint: errcheck
- }
-
- // escape a character
- if i >= len(text) {
- break
- }
-
- w.Write([]byte{'\\', text[i]}) // nolint: errcheck
- }
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 000000000..fad895851
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*.go]
+indent_style = tab
+indent_size = 4
+insert_final_newline = true
+
+[*.{yml,yaml}]
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
new file mode 100644
index 000000000..32f1001be
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes
@@ -0,0 +1 @@
+go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
new file mode 100644
index 000000000..4cd0cbaf4
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap
new file mode 100644
index 000000000..a04f2907f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.mailmap
@@ -0,0 +1,2 @@
+Chris Howey
+Nathan Youngman <4566+nathany@users.noreply.github.com>
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 000000000..6cbabe5ef
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,62 @@
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
+
+# Please keep the list sorted.
+
+Aaron L
+Adrien Bustany
+Alexey Kazakov
+Amit Krishnan
+Anmol Sethi
+Bjørn Erik Pedersen
+Brian Goff
+Bruno Bigras
+Caleb Spare
+Case Nelson
+Chris Howey
+Christoffer Buchholz
+Daniel Wagner-Hall
+Dave Cheney
+Eric Lin
+Evan Phoenix
+Francisco Souza
+Gautam Dey
+Hari haran
+Ichinose Shogo
+Johannes Ebke
+John C Barstow
+Kelvin Fo
+Ken-ichirou MATSUZAWA
+Matt Layher
+Matthias Stone
+Nathan Youngman
+Nickolai Zeldovich
+Oliver Bristow
+Patrick
+Paul Hammond
+Pawel Knap
+Pieter Droogendijk
+Pratik Shinde
+Pursuit92
+Riku Voipio
+Rob Figueiredo
+Rodrigo Chiossi
+Slawek Ligus
+Soge Zhang
+Tiffany Jernigan
+Tilak Sharma
+Tobias Klauser
+Tom Payne
+Travis Cline
+Tudor Golubenco
+Vahe Khachikyan
+Yukang
+bronze1man
+debrando
+henrikedwards
+铁哥
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 000000000..a438fe4b4
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,339 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.5.1] - 2021-08-24
+
+* Revert Add AddRaw to not follow symlinks
+
+## [1.5.0] - 2021-08-20
+
+* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
+* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
+* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
+* CI: Use GitHub Actions for CI and cover go 1.12-1.17
+ [#378](https://github.com/fsnotify/fsnotify/pull/378)
+ [#381](https://github.com/fsnotify/fsnotify/pull/381)
+ [#385](https://github.com/fsnotify/fsnotify/pull/385)
+* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
+
+## [1.4.7] - 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## [1.4.2] - 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## [1.4.1] - 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## [1.4.0] - 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## [1.3.1] - 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## [1.3.0] - 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## [1.2.10] - 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## [1.2.9] - 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## [1.2.8] - 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## [1.2.5] - 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## [1.2.1] - 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## [1.2.0] - 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## [1.1.1] - 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## [1.1.0] - 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## [1.0.4] - 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## [1.0.3] - 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## [1.0.2] - 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## [1.0.0] - 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## [0.9.3] - 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## [0.9.2] - 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## [0.9.1] - 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## [0.9.0] - 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## [0.8.12] - 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## [0.8.11] - 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## [0.8.10] - 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## [0.8.9] - 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## [0.8.8] - 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## [0.8.7] - 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## [0.8.6] - 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## [0.8.5] - 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## [0.8.4] - 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## [0.8.3] - 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## [0.8.2] - 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## [0.8.1] - 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## [0.8.0] - 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## [0.7.4] - 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## [0.7.3] - 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## [0.7.2] - 2012-09-01
+
+* kqueue: events for created directories
+
+## [0.7.1] - 2012-07-14
+
+* [Fix] for renaming files
+
+## [0.7.0] - 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## [0.6.0] - 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## [0.5.1] - 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## [0.5.0] - 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## [0.4.0] - 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## [0.3.0] - 2012-02-19
+
+* kqueue: add files when watch directory
+
+## [0.2.0] - 2011-12-30
+
+* update to latest Go weekly code
+
+## [0.1.0] - 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 000000000..828a60b24
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 000000000..e180c8fb0
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 000000000..df57b1b28
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,130 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+| Adapter | OS | Status |
+| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
+| inotify | Linux 2.6.27 or later, Android\* | Supported |
+| kqueue | BSD, macOS, iOS\* | Supported |
+| ReadDirectoryChangesW | Windows | Supported |
+| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
+| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
+| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
+| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
+| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Usage
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func main() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
+```
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
+
+fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 000000000..b3ac3d8f5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,38 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build solaris
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 000000000..0f4ee52e8
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,69 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var (
+ ErrEventOverflow = errors.New("fsnotify queue overflow")
+)
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 000000000..eb87699b5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,338 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
+ w.mu.Unlock()
+
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 000000000..e9ff9439f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,188 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 000000000..368f5b790
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,522 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+
+ // copy paths to remove while locked
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ for _, name := range pathsToRemove {
+ w.Remove(name)
+ }
+
+ // send a "quit" message to the reader goroutine
+ close(w.done)
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ break loop
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 000000000..36cc3845b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd || openbsd || netbsd || dragonfly
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 000000000..98cd8476f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 000000000..c02b75f7c
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,562 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore
new file mode 100644
index 000000000..15586a2b5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.gitignore
@@ -0,0 +1,9 @@
+y.output
+
+# ignore intellij files
+.idea
+*.iml
+*.ipr
+*.iws
+
+*.test
diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml
new file mode 100644
index 000000000..cb63a3216
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.travis.yml
@@ -0,0 +1,13 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.x
+ - tip
+
+branches:
+ only:
+ - master
+
+script: make test
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 000000000..c33dcc7c9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644
index 000000000..84fd743f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/Makefile
@@ -0,0 +1,18 @@
+TEST?=./...
+
+default: test
+
+fmt: generate
+ go fmt ./...
+
+test: generate
+ go get -t ./...
+ go test $(TEST) $(TESTARGS)
+
+generate:
+ go generate ./...
+
+updatedeps:
+ go get -u golang.org/x/tools/cmd/stringer
+
+.PHONY: default generate test updatedeps
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 000000000..c8223326d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+ * Single line comments start with `#` or `//`
+
+ * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+ are not allowed. A multi-line comment (also known as a block comment)
+ terminates at the first `*/` found.
+
+ * Values are assigned with the syntax `key = value` (whitespace doesn't
+ matter). The value can be any primitive: a string, number, boolean,
+ object, or list.
+
+ * Strings are double-quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Multi-line strings start with `<-
+ echo %Path%
+
+ go version
+
+ go env
+
+ go get -t ./...
+
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 000000000..bed9ebbe1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,729 @@
+package hcl
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+ // nodeType holds a reference to the type of ast.Node
+ nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+ root, err := parse(bs)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+ obj, err := Parse(in)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+ val := reflect.ValueOf(out)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("result must be a pointer")
+ }
+
+ // If we have the file, we really decode the root node
+ if f, ok := n.(*ast.File); ok {
+ n = f.Node
+ }
+
+ var d decoder
+ return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+ stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+ k := result
+
+ // If we have an interface with a valid value, we use that
+ // for the check.
+ if result.Kind() == reflect.Interface {
+ elem := result.Elem()
+ if elem.IsValid() {
+ k = elem
+ }
+ }
+
+ // Push current onto stack unless it is an interface.
+ if k.Kind() != reflect.Interface {
+ d.stack = append(d.stack, k.Kind())
+
+ // Schedule a pop
+ defer func() {
+ d.stack = d.stack[:len(d.stack)-1]
+ }()
+ }
+
+ switch k.Kind() {
+ case reflect.Bool:
+ return d.decodeBool(name, node, result)
+ case reflect.Float32, reflect.Float64:
+ return d.decodeFloat(name, node, result)
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ return d.decodeInt(name, node, result)
+ case reflect.Interface:
+ // When we see an interface, we make our own thing
+ return d.decodeInterface(name, node, result)
+ case reflect.Map:
+ return d.decodeMap(name, node, result)
+ case reflect.Ptr:
+ return d.decodePtr(name, node, result)
+ case reflect.Slice:
+ return d.decodeSlice(name, node, result)
+ case reflect.String:
+ return d.decodeString(name, node, result)
+ case reflect.Struct:
+ return d.decodeStruct(name, node, result)
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+ }
+ }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.BOOL {
+ v, err := strconv.ParseBool(n.Token.Text)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+ v, err := strconv.ParseFloat(n.Token.Text, 64)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ case token.STRING:
+ v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+ // When we see an ast.Node, we retain the value to enable deferred decoding.
+ // Very useful in situations where we want to preserve ast.Node information
+ // like Pos
+ if result.Type() == nodeType && result.CanSet() {
+ result.Set(reflect.ValueOf(node))
+ return nil
+ }
+
+ var set reflect.Value
+ redecode := true
+
+ // For testing types, ObjectType should just be treated as a list. We
+ // set this to a temporary var because we want to pass in the real node.
+ testNode := node
+ if ot, ok := node.(*ast.ObjectType); ok {
+ testNode = ot.List
+ }
+
+ switch n := testNode.(type) {
+ case *ast.ObjectList:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+ set = result
+ }
+ case *ast.ObjectType:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+ set = result
+ }
+ case *ast.ListType:
+ var temp []interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+ set = result
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.BOOL:
+ var result bool
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.FLOAT:
+ var result float64
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.NUMBER:
+ var result int
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.STRING, token.HEREDOC:
+ set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+ }
+ }
+ default:
+ return fmt.Errorf(
+ "%s: cannot decode into interface: %T",
+ name, node)
+ }
+
+ // Set the result to what its supposed to be, then reset
+ // result so we don't reflect into this method anymore.
+ result.Set(set)
+
+ if redecode {
+ // Revisit the node so that we can use the newly instantiated
+ // thing and populate it.
+ if err := d.decode(name, node, result); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+ if item, ok := node.(*ast.ObjectItem); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ n, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+ }
+ }
+
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ resultKeyType := resultType.Key()
+ if resultKeyType.Kind() != reflect.String {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Make a map if it is nil
+ resultMap := result
+ if result.IsNil() {
+ resultMap = reflect.MakeMap(
+ reflect.MapOf(resultKeyType, resultElemType))
+ }
+
+ // Go through each element and decode it.
+ done := make(map[string]struct{})
+ for _, item := range n.Items {
+ if item.Val == nil {
+ continue
+ }
+
+ // github.com/hashicorp/terraform/issue/5740
+ if len(item.Keys) == 0 {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Get the key we're dealing with, which is the first item
+ keyStr := item.Keys[0].Token.Value().(string)
+
+ // If we've already processed this key, then ignore it
+ if _, ok := done[keyStr]; ok {
+ continue
+ }
+
+ // Determine the value. If we have more than one key, then we
+ // get the objectlist of only these keys.
+ itemVal := item.Val
+ if len(item.Keys) > 1 {
+ itemVal = n.Filter(keyStr)
+ done[keyStr] = struct{}{}
+ }
+
+ // Make the field name
+ fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+ // Get the key/value as reflection values
+ key := reflect.ValueOf(keyStr)
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // If we have a pre-existing value in the map, use that
+ oldVal := resultMap.MapIndex(key)
+ if oldVal.IsValid() {
+ val.Set(oldVal)
+ }
+
+ // Decode!
+ if err := d.decode(fieldName, itemVal, val); err != nil {
+ return err
+ }
+
+ // Set the value on the map
+ resultMap.SetMapIndex(key, val)
+ }
+
+ // Set the final map if we can
+ set.Set(resultMap)
+ return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ val := reflect.New(resultElemType)
+ if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+ return err
+ }
+
+ result.Set(val)
+ return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+ // Create the slice if it isn't nil
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ if result.IsNil() {
+ resultSliceType := reflect.SliceOf(resultElemType)
+ result = reflect.MakeSlice(
+ resultSliceType, 0, 0)
+ }
+
+ // Figure out the items we'll be copying into the slice
+ var items []ast.Node
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ items = make([]ast.Node, len(n.Items))
+ for i, item := range n.Items {
+ items[i] = item
+ }
+ case *ast.ObjectType:
+ items = []ast.Node{n}
+ case *ast.ListType:
+ items = n.List
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("unknown slice type: %T", node),
+ }
+ }
+
+ for i, item := range items {
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+ // Decode
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // if item is an object that was decoded from ambiguous JSON and
+ // flattened, make sure it's expanded if it needs to decode into a
+ // defined structure.
+ item := expandObject(item, val)
+
+ if err := d.decode(fieldName, item, val); err != nil {
+ return err
+ }
+
+ // Append it onto the slice
+ result = reflect.Append(result, val)
+ }
+
+ set.Set(result)
+ return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+ item, ok := node.(*ast.ObjectItem)
+ if !ok {
+ return node
+ }
+
+ elemType := result.Type()
+
+ // our target type must be a struct
+ switch elemType.Kind() {
+ case reflect.Ptr:
+ switch elemType.Elem().Kind() {
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+
+ // A list value will have a key and field name. If it had more fields,
+ // it wouldn't have been flattened.
+ if len(item.Keys) != 2 {
+ return node
+ }
+
+ keyToken := item.Keys[0].Token
+ item.Keys = item.Keys[1:]
+
+ // we need to un-flatten the ast enough to decode
+ newNode := &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{
+ &ast.ObjectKey{
+ Token: keyToken,
+ },
+ },
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+
+ return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+ return nil
+ case token.STRING, token.HEREDOC:
+ result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+ }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+ var item *ast.ObjectItem
+ if it, ok := node.(*ast.ObjectItem); ok {
+ item = it
+ node = it.Val
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ // Handle the special case where the object itself is a literal. Previously
+ // the yacc parser would always ensure top-level elements were arrays. The new
+ // parser does not make the same guarantees, thus we need to convert any
+ // top-level literal elements into a list.
+ if _, ok := node.(*ast.LiteralType); ok && item != nil {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ list, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+ }
+ }
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = result
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+ // Ignore fields with tag name "-"
+ if tagParts[0] == "-" {
+ continue
+ }
+
+ if fieldType.Anonymous {
+ fieldKind := fieldType.Type.Kind()
+ if fieldKind != reflect.Struct {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unsupported type to struct: %s",
+ fieldType.Name, fieldKind),
+ }
+ }
+
+ // We have an embedded field. We "squash" the fields down
+ // if specified in the tag.
+ squash := false
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ structs = append(
+ structs, result.FieldByName(fieldType.Name))
+ continue
+ }
+ }
+
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, structVal.Field(i)})
+ }
+ }
+
+ usedKeys := make(map[string]struct{})
+ decodedFields := make([]string, 0, len(fields))
+ decodedFieldsVal := make([]reflect.Value, 0)
+ unusedKeysVal := make([]reflect.Value, 0)
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ if !fieldValue.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !fieldValue.CanSet() {
+ continue
+ }
+
+ fieldName := field.Name
+
+ tagValue := field.Tag.Get(tagName)
+ tagParts := strings.SplitN(tagValue, ",", 2)
+ if len(tagParts) >= 2 {
+ switch tagParts[1] {
+ case "decodedFields":
+ decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+ continue
+ case "key":
+ if item == nil {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+ name, fieldName),
+ }
+ }
+
+ fieldValue.SetString(item.Keys[0].Token.Value().(string))
+ continue
+ case "unusedKeys":
+ unusedKeysVal = append(unusedKeysVal, fieldValue)
+ continue
+ }
+ }
+
+ if tagParts[0] != "" {
+ fieldName = tagParts[0]
+ }
+
+ // Determine the element we'll use to decode. If it is a single
+ // match (only object with the field), then we decode it exactly.
+ // If it is a prefix match, then we decode the matches.
+ filter := list.Filter(fieldName)
+
+ prefixMatches := filter.Children()
+ matches := filter.Elem()
+ if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+ continue
+ }
+
+ // Track the used key
+ usedKeys[fieldName] = struct{}{}
+
+ // Create the field name and decode. We range over the elements
+ // because we actually want the value.
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ if len(prefixMatches.Items) > 0 {
+ if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+ return err
+ }
+ }
+ for _, match := range matches.Items {
+ var decodeNode ast.Node = match.Val
+ if ot, ok := decodeNode.(*ast.ObjectType); ok {
+ decodeNode = &ast.ObjectList{Items: ot.List.Items}
+ }
+
+ if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+ return err
+ }
+ }
+
+ decodedFields = append(decodedFields, field.Name)
+ }
+
+ if len(decodedFieldsVal) > 0 {
+ // Sort it so that it is deterministic
+ sort.Strings(decodedFields)
+
+ for _, v := range decodedFieldsVal {
+ v.Set(reflect.ValueOf(decodedFields))
+ }
+ }
+
+ return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+ var nodeContainer struct {
+ Node ast.Node
+ }
+ value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+ return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 000000000..575a20b50
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 000000000..6e5ef654b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+ node()
+ Pos() token.Pos
+}
+
+func (File) node() {}
+func (ObjectList) node() {}
+func (ObjectKey) node() {}
+func (ObjectItem) node() {}
+func (Comment) node() {}
+func (CommentGroup) node() {}
+func (ObjectType) node() {}
+func (LiteralType) node() {}
+func (ListType) node() {}
+
+// File represents a single HCL file
+type File struct {
+ Node Node // usually a *ObjectList
+ Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+ return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+ Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+ o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ // If there aren't enough keys, then ignore this
+ if len(item.Keys) < len(keys) {
+ continue
+ }
+
+ match := true
+ for i, key := range item.Keys[:len(keys)] {
+ key := key.Token.Value().(string)
+ if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ // Strip off the prefix from the children
+ newItem := *item
+ newItem.Keys = newItem.Keys[len(keys):]
+ result.Add(&newItem)
+ }
+
+ return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) > 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) == 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+ // always returns the uninitiliazed position
+ return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+ // keys is only one length long if it's of type assignment. If it's a
+ // nested object it can be larger than one. In that case "assign" is
+ // invalid as there is no assignments for a nested object.
+ Keys []*ObjectKey
+
+ // assign contains the position of "=", if any
+ Assign token.Pos
+
+ // val is the item itself. It can be an object,list, number, bool or a
+ // string. If key length is larger than one, val can be only of type
+ // Object.
+ Val Node
+
+ LeadComment *CommentGroup // associated lead comment
+ LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+ // I'm not entirely sure what causes this, but removing this causes
+ // a test failure. We should investigate at some point.
+ if len(o.Keys) == 0 {
+ return token.Pos{}
+ }
+
+ return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+ Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+ return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+ Token token.Token
+
+ // comment types, only used when in a list
+ LeadComment *CommentGroup
+ LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+ return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+ Lbrack token.Pos // position of "["
+ Rbrack token.Pos // position of "]"
+ List []Node // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+ return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+ l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+ Lbrace token.Pos // position of "{"
+ Rbrace token.Pos // position of "}"
+ List *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+ return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+ Start token.Pos // position of / or #
+ Text string
+}
+
+func (c *Comment) Pos() token.Pos {
+ return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+ return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 000000000..ba07ad42b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+ rewritten, ok := fn(node)
+ if !ok {
+ return rewritten
+ }
+
+ switch n := node.(type) {
+ case *File:
+ n.Node = Walk(n.Node, fn)
+ case *ObjectList:
+ for i, item := range n.Items {
+ n.Items[i] = Walk(item, fn).(*ObjectItem)
+ }
+ case *ObjectKey:
+ // nothing to do
+ case *ObjectItem:
+ for i, k := range n.Keys {
+ n.Keys[i] = Walk(k, fn).(*ObjectKey)
+ }
+
+ if n.Val != nil {
+ n.Val = Walk(n.Val, fn)
+ }
+ case *LiteralType:
+ // nothing to do
+ case *ListType:
+ for i, l := range n.List {
+ n.List[i] = Walk(l, fn)
+ }
+ case *ObjectType:
+ n.List = Walk(n.List, fn).(*ObjectList)
+ default:
+ // should we panic here?
+ fmt.Printf("unknown type: %T\n", n)
+ }
+
+ fn(nil)
+ return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 000000000..5c99381df
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+ Pos token.Pos
+ Err error
+}
+
+func (e *PosError) Error() string {
+ return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 000000000..64c83bcfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,532 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/scanner"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ // normalize all line endings
+ // since the scanner and output only work with "\n" line endings, we may
+ // end up with dangling "\r" characters in the parsed data.
+ src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+ }
+
+ f.Node, err = p.objectList(false)
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ f.Comments = p.comments
+ return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ if obj {
+ tok := p.scan()
+ p.unscan()
+ if tok.Type == token.RBRACE {
+ break
+ }
+ }
+
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // object lists can be optionally comma-delimited e.g. when a list of maps
+ // is being expressed, so a comma is allowed here - it's simply consumed
+ tok := p.scan()
+ if tok.Type != token.COMMA {
+ p.unscan()
+ }
+ }
+ return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+ endline = p.tok.Pos.Line
+
+ // count the endline if it's multiline comment, ie starting with /*
+ if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.tok.Text); i++ {
+ if p.tok.Text[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+ p.tok = p.sc.Scan()
+ return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.tok.Pos.Line
+
+ for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if len(keys) > 0 && err == errEofToken {
+ // We ignore eof token here since it is an error if we didn't
+ // receive a value (but we did receive a key) for the item.
+ err = nil
+ }
+ if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+ // This is a strange boolean statement, but what it means is:
+ // We have keys with no value, and we're likely in an object
+ // (since RBrace ends an object). For this, we set err to nil so
+ // we continue and get the error below of having the wrong value
+ // type.
+ err = nil
+
+ // Reset the token type so we don't think it completed fine. See
+ // objectType which uses p.tok.Type to check if we're done with
+ // the object.
+ p.tok.Type = token.EOF
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ if p.leadComment != nil {
+ o.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ switch p.tok.Type {
+ case token.ASSIGN:
+ o.Assign = p.tok.Pos
+ o.Val, err = p.object()
+ if err != nil {
+ return nil, err
+ }
+ case token.LBRACE:
+ o.Val, err = p.objectType()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ keyStr := make([]string, 0, len(keys))
+ for _, k := range keys {
+ keyStr = append(keyStr, k.Token.Text)
+ }
+
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " ")),
+ }
+ }
+
+ // key=#comment
+ // val
+ if p.lineComment != nil {
+ o.LineComment, p.lineComment = p.lineComment, nil
+ }
+
+ // do a look-ahead for line comment
+ p.scan()
+ if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+ o.LineComment = p.lineComment
+ p.lineComment = nil
+ }
+ p.unscan()
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ // It is very important to also return the keys here as well as
+ // the error. This is because we need to be able to tell if we
+ // did parse keys prior to finding the EOF, or if we just found
+ // a bare EOF.
+ return keys, errEofToken
+ case token.ASSIGN:
+ // assignment or object only, but not nested objects. this is not
+ // allowed: `foo bar = {}`
+ if keyCount > 1 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+ }
+ }
+
+ if keyCount == 0 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: errors.New("no object keys found!"),
+ }
+ }
+
+ return keys, nil
+ case token.LBRACE:
+ var err error
+
+ // If we have no keys, then it is a syntax error. i.e. {{}} is not
+ // allowed.
+ if len(keys) == 0 {
+ err = &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+ }
+ }
+
+ // object
+ return keys, err
+ case token.IDENT, token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{Token: p.tok})
+ case token.ILLEGAL:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
+ default:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+ }
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.COMMENT:
+ // implement comment
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("Unknown token: %+v", tok),
+ }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{
+ Lbrace: p.tok.Pos,
+ }
+
+ l, err := p.objectList(true)
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ // No error, scan and expect the ending to be a brace
+ if tok := p.scan(); tok.Type != token.RBRACE {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+ }
+ }
+
+ o.List = l
+ o.Rbrace = p.tok.Pos // advanced via parseObjectList
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{
+ Lbrack: p.tok.Pos,
+ }
+
+ needComma := false
+ for {
+ tok := p.scan()
+ if needComma {
+ switch tok.Type {
+ case token.COMMA, token.RBRACK:
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error parsing list, expected comma or list end, got: %s",
+ tok.Type),
+ }
+ }
+ }
+ switch tok.Type {
+ case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ // If there is a lead comment, apply it
+ if p.leadComment != nil {
+ node.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ l.Add(node)
+ needComma = true
+ case token.COMMA:
+ // get next list item or we are at the end
+ // do a look-ahead for line comment
+ p.scan()
+ if p.lineComment != nil && len(l.List) > 0 {
+ lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+ if ok {
+ lit.LineComment = p.lineComment
+ l.List[len(l.List)-1] = lit
+ p.lineComment = nil
+ }
+ }
+ p.unscan()
+
+ needComma = false
+ continue
+ case token.LBRACE:
+ // Looks like a nested object, so parse it out
+ node, err := p.objectType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse object within list: %s", err),
+ }
+ }
+ l.Add(node)
+ needComma = true
+ case token.LBRACK:
+ node, err := p.listType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse list within list: %s", err),
+ }
+ }
+ l.Add(node)
+ case token.RBRACK:
+ // finished
+ l.Rbrack = p.tok.Pos
+ return l, nil
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+ }
+ }
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok,
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ // Otherwise read the next token from the scanner and Save it to the buffer
+ // in case we unscan later.
+ prev := p.tok
+ p.tok = p.sc.Scan()
+
+ if p.tok.Type == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+ // p.tok.Pos.Line, prev.Pos.Line, endline)
+ if p.tok.Pos.Line == prev.Pos.Line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.tok.Pos.Line != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok.Type == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+ switch p.tok.Type {
+ case token.RBRACE, token.RBRACK:
+ // Do not count for these cases
+ default:
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+
+ }
+
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
new file mode 100644
index 000000000..7c038d12a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
@@ -0,0 +1,789 @@
+package printer
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+const (
+ blank = byte(' ')
+ newline = byte('\n')
+ tab = byte('\t')
+ infinity = 1 << 30 // offset or line
+)
+
+var (
+ unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+ cfg Config
+ prev token.Pos
+
+ comments []*ast.CommentGroup // may be nil, contains all comments
+ standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+ enableTrace bool
+ indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int { return len(b) }
+func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+ // first collect all comments. This is already stored in
+ // ast.File.(comments)
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.File:
+ p.comments = t.Comments
+ return nn, false
+ }
+ return nn, true
+ })
+
+ standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+ for _, c := range p.comments {
+ standaloneComments[c.Pos()] = c
+ }
+
+ // next remove all lead and line comments from the overall comment map.
+ // This will give us comments which are standalone, comments which are not
+ // assigned to any kind of node.
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.LiteralType:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ case *ast.ObjectItem:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ }
+
+ return nn, true
+ })
+
+ for _, c := range standaloneComments {
+ p.standaloneComments = append(p.standaloneComments, c)
+ }
+
+ sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+ var buf bytes.Buffer
+
+ switch t := n.(type) {
+ case *ast.File:
+ // File doesn't trace so we add the tracing here
+ defer un(trace(p, "File"))
+ return p.output(t.Node)
+ case *ast.ObjectList:
+ defer un(trace(p, "ObjectList"))
+
+ var index int
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is at "infinity"
+ var nextItem token.Pos
+ if index != len(t.Items) {
+ nextItem = t.Items[index].Pos()
+ } else {
+ nextItem = token.Pos{Offset: infinity, Line: infinity}
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ // Go through all the comments in the group. The group
+ // should be printed together, not separated by double newlines.
+ printed := false
+ newlinePrinted := false
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // if we hit the end add newlines so we can print the comment
+ // we don't do this if prev is invalid which means the
+ // beginning of the file since the first comment should
+ // be at the first line.
+ if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+ buf.Write([]byte{newline, newline})
+ newlinePrinted = true
+ }
+
+ // Write the actual comment.
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+
+ // Set printed to true to note that we printed something
+ printed = true
+ }
+ }
+
+ // If we're not at the last item, write a new line so
+ // that there is a newline separating this comment from
+ // the next object.
+ if printed && index != len(t.Items) {
+ buf.WriteByte(newline)
+ }
+ }
+
+ if index == len(t.Items) {
+ break
+ }
+
+ buf.Write(p.output(t.Items[index]))
+ if index != len(t.Items)-1 {
+ // Always write a newline to separate us from the next item
+ buf.WriteByte(newline)
+
+ // Need to determine if we're going to separate the next item
+ // with a blank line. The logic here is simple, though there
+ // are a few conditions:
+ //
+ // 1. The next object is more than one line away anyways,
+ // so we need an empty line.
+ //
+ // 2. The next object is not a "single line" object, so
+ // we need an empty line.
+ //
+ // 3. This current object is not a single line object,
+ // so we need an empty line.
+ current := t.Items[index]
+ next := t.Items[index+1]
+ if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+ !p.isSingleLineObject(next) ||
+ !p.isSingleLineObject(current) {
+ buf.WriteByte(newline)
+ }
+ }
+ index++
+ }
+ case *ast.ObjectKey:
+ buf.WriteString(t.Token.Text)
+ case *ast.ObjectItem:
+ p.prev = t.Pos()
+ buf.Write(p.objectItem(t))
+ case *ast.LiteralType:
+ buf.Write(p.literalType(t))
+ case *ast.ListType:
+ buf.Write(p.list(t))
+ case *ast.ObjectType:
+ buf.Write(p.objectType(t))
+ default:
+ fmt.Printf(" unknown type: %T\n", n)
+ }
+
+ return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+ result := []byte(lit.Token.Text)
+ switch lit.Token.Type {
+ case token.HEREDOC:
+ // Clear the trailing newline from heredocs
+ if result[len(result)-1] == '\n' {
+ result = result[:len(result)-1]
+ }
+
+ // Poison lines 2+ so that we don't indent them
+ result = p.heredocIndent(result)
+ case token.STRING:
+ // If this is a multiline string, poison lines 2+ so we don't
+ // indent them.
+ if bytes.IndexRune(result, '\n') >= 0 {
+ result = p.heredocIndent(result)
+ }
+ }
+
+ return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+ defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+ var buf bytes.Buffer
+
+ if o.LeadComment != nil {
+ for _, comment := range o.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ // If key and val are on different lines, treat line comments like lead comments.
+ if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range o.Keys {
+ buf.WriteString(k.Token.Text)
+ buf.WriteByte(blank)
+
+ // reach end of key
+ if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ buf.Write(p.output(o.Val))
+
+ if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+ buf.WriteByte(blank)
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+ defer un(trace(p, "ObjectType"))
+ var buf bytes.Buffer
+ buf.WriteString("{")
+
+ var index int
+ var nextItem token.Pos
+ var commented, newlinePrinted bool
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is the closing brace
+ if index != len(o.List.Items) {
+ nextItem = o.List.Items[index].Pos()
+ } else {
+ nextItem = o.Rbrace
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ printed := false
+ var lastCommentPos token.Pos
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // If there are standalone comments and the initial newline has not
+ // been printed yet, do it now.
+ if !newlinePrinted {
+ newlinePrinted = true
+ buf.WriteByte(newline)
+ }
+
+ // add newline if it's between other printed nodes
+ if index > 0 {
+ commented = true
+ buf.WriteByte(newline)
+ }
+
+ // Store this position
+ lastCommentPos = comment.Pos()
+
+ // output the comment itself
+ buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+ // Set printed to true to note that we printed something
+ printed = true
+
+ /*
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ */
+ }
+ }
+
+ // Stuff to do if we had comments
+ if printed {
+ // Always write a newline
+ buf.WriteByte(newline)
+
+ // If there is another item in the object and our comment
+ // didn't hug it directly, then make sure there is a blank
+ // line separating them.
+ if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+ buf.WriteByte(newline)
+ }
+ }
+ }
+
+ if index == len(o.List.Items) {
+ p.prev = o.Rbrace
+ break
+ }
+
+ // At this point we are sure that it's not a totally empty block: print
+ // the initial newline if it hasn't been printed yet by the previous
+ // block about standalone comments.
+ if !newlinePrinted {
+ buf.WriteByte(newline)
+ newlinePrinted = true
+ }
+
+ // check if we have adjacent one liner items. If yes we'll going to align
+ // the comments.
+ var aligned []*ast.ObjectItem
+ for _, item := range o.List.Items[index:] {
+ // we don't group one line lists
+ if len(o.List.Items) == 1 {
+ break
+ }
+
+ // one means a oneliner with out any lead comment
+ // two means a oneliner with lead comment
+ // anything else might be something else
+ cur := lines(string(p.objectItem(item)))
+ if cur > 2 {
+ break
+ }
+
+ curPos := item.Pos()
+
+ nextPos := token.Pos{}
+ if index != len(o.List.Items)-1 {
+ nextPos = o.List.Items[index+1].Pos()
+ }
+
+ prevPos := token.Pos{}
+ if index != 0 {
+ prevPos = o.List.Items[index-1].Pos()
+ }
+
+ // fmt.Println("DEBUG ----------------")
+ // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+ // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+ // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+ if curPos.Line+1 == nextPos.Line {
+ aligned = append(aligned, item)
+ index++
+ continue
+ }
+
+ if curPos.Line-1 == prevPos.Line {
+ aligned = append(aligned, item)
+ index++
+
+ // finish if we have a new line or comment next. This happens
+ // if the next item is not adjacent
+ if curPos.Line+1 != nextPos.Line {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ // put newlines if the items are between other non aligned items.
+ // newlines are also added if there is a standalone comment already, so
+ // check it too
+ if !commented && index != len(aligned) {
+ buf.WriteByte(newline)
+ }
+
+ if len(aligned) >= 1 {
+ p.prev = aligned[len(aligned)-1].Pos()
+
+ items := p.alignedItems(aligned)
+ buf.Write(p.indent(items))
+ } else {
+ p.prev = o.List.Items[index].Pos()
+
+ buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+ index++
+ }
+
+ buf.WriteByte(newline)
+ }
+
+ buf.WriteString("}")
+ return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+ var buf bytes.Buffer
+
+ // find the longest key and value length, needed for alignment
+ var longestKeyLen int // longest key length
+ var longestValLen int // longest value length
+ for _, item := range items {
+ key := len(item.Keys[0].Token.Text)
+ val := len(p.output(item.Val))
+
+ if key > longestKeyLen {
+ longestKeyLen = key
+ }
+
+ if val > longestValLen {
+ longestValLen = val
+ }
+ }
+
+ for i, item := range items {
+ if item.LeadComment != nil {
+ for _, comment := range item.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range item.Keys {
+ keyLen := len(k.Token.Text)
+ buf.WriteString(k.Token.Text)
+ for i := 0; i < longestKeyLen-keyLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ // reach end of key
+ if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ val := p.output(item.Val)
+ valLen := len(val)
+ buf.Write(val)
+
+ if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+ for i := 0; i < longestValLen-valLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range item.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // do not print for the last item
+ if i != len(items)-1 {
+ buf.WriteByte(newline)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+ if p.isSingleLineList(l) {
+ return p.singleLineList(l)
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString("[")
+ buf.WriteByte(newline)
+
+ var longestLine int
+ for _, item := range l.List {
+ // for now we assume that the list only contains literal types
+ if lit, ok := item.(*ast.LiteralType); ok {
+ lineLen := len(lit.Token.Text)
+ if lineLen > longestLine {
+ longestLine = lineLen
+ }
+ }
+ }
+
+ haveEmptyLine := false
+ for i, item := range l.List {
+ // If we have a lead comment, then we want to write that first
+ leadComment := false
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+ leadComment = true
+
+ // Ensure an empty line before every element with a
+ // lead comment (except the first item in a list).
+ if !haveEmptyLine && i != 0 {
+ buf.WriteByte(newline)
+ }
+
+ for _, comment := range lit.LeadComment.List {
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ }
+ }
+
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(p.indent(val))
+
+ // if this item is a heredoc, then we output the comma on
+ // the next line. This is the only case this happens.
+ comma := []byte{','}
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ comma = p.indent(comma)
+ }
+
+ buf.Write(comma)
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ buf.WriteByte(newline)
+
+ // Ensure an empty line after every element with a
+ // lead comment (except the first item in a list).
+ haveEmptyLine = leadComment && i != len(l.List)-1
+ if haveEmptyLine {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+ for _, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ return false
+ }
+
+ lit, ok := item.(*ast.LiteralType)
+ if !ok {
+ return false
+ }
+
+ if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+ return false
+ }
+
+ if lit.LineComment != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+ buf := &bytes.Buffer{}
+
+ buf.WriteString("[")
+ for i, item := range l.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+
+ // Output the item itself
+ buf.Write(p.output(item))
+
+ // The heredoc marker needs to be at the end of line.
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+ var prefix []byte
+ if p.cfg.SpacesWidth != 0 {
+ for i := 0; i < p.cfg.SpacesWidth; i++ {
+ prefix = append(prefix, blank)
+ }
+ } else {
+ prefix = []byte{tab}
+ }
+
+ var res []byte
+ bol := true
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+ var res []byte
+ for i := 0; i < len(buf); i++ {
+ skip := len(buf)-i <= len(unindent)
+ if !skip {
+ skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+ }
+ if skip {
+ res = append(res, buf[i])
+ continue
+ }
+
+ // We have a marker. we have to backtrace here and clean out
+ // any whitespace ahead of our tombstone up to a \n
+ for j := len(res) - 1; j >= 0; j-- {
+ if res[j] == '\n' {
+ break
+ }
+
+ res = res[:j]
+ }
+
+ // Skip the entire unindent marker
+ i += len(unindent) - 1
+ }
+
+ return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+ var res []byte
+ bol := false
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, unindent...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+// * has no lead comments (hence multi-line)
+// * has no assignment
+// * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+ // If there is a lead comment, can't be one line
+ if val.LeadComment != nil {
+ return false
+ }
+
+ // If there is assignment, we always break by line
+ if val.Assign.IsValid() {
+ return false
+ }
+
+ // If it isn't an object type, then its not a single line object
+ ot, ok := val.Val.(*ast.ObjectType)
+ if !ok {
+ return false
+ }
+
+ // If the object has no items, it is single line!
+ return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+ endline := 1
+ for i := 0; i < len(txt); i++ {
+ if txt[i] == '\n' {
+ endline++
+ }
+ }
+ return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ i := 2 * p.indentTrace
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+ p.printTrace(msg, "(")
+ p.indentTrace++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+ p.indentTrace--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 000000000..6617ab8e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+ "bytes"
+ "io"
+ "text/tabwriter"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+ SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+ p := &printer{
+ cfg: *c,
+ comments: make([]*ast.CommentGroup, 0),
+ standaloneComments: make([]*ast.CommentGroup, 0),
+ // enableTrace: true,
+ }
+
+ p.collectComments(node)
+
+ if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+ return err
+ }
+
+ // flush tabwriter, if any
+ var err error
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+ return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := DefaultConfig.Fprint(&buf, node); err != nil {
+ return nil, err
+ }
+
+ // Add trailing newline to result
+ buf.WriteString("\n")
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 000000000..624a18fe3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,652 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "regexp"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == utf8.RuneError && size == 1 {
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ if ch == '\x00' {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
+ if ch == '\uE123' {
+ s.err("unicode code point U+E123 reserved for internal use")
+ return utf8.RuneError
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ tok = token.IDENT
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '#', '/':
+ tok = token.COMMENT
+ s.scanComment(ch)
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '<':
+ tok = token.HEREDOC
+ s.scanHeredoc()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case '=':
+ tok = token.ASSIGN
+ case '+':
+ tok = token.ADD
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ tok = token.SUB
+ }
+ default:
+ s.err("illegal char")
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+ // single line comments
+ if ch == '#' || (ch == '/' && s.peek() != '*') {
+ if ch == '/' && s.peek() != '/' {
+ s.err("expected '/' for comment")
+ return
+ }
+
+ ch = s.next()
+ for ch != '\n' && ch >= 0 && ch != eof {
+ ch = s.next()
+ }
+ if ch != eof && ch >= 0 {
+ s.unread()
+ }
+ return
+ }
+
+ // be sure we get the character after /* This allows us to find comment's
+ // that are not erminated
+ if ch == '/' {
+ s.next()
+ ch = s.next() // read character after "/*"
+ }
+
+ // look for /* - style comments
+ for {
+ if ch < 0 || ch == eof {
+ s.err("comment not terminated")
+ break
+ }
+
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ break
+ }
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ if ch == '0' {
+ // check for hexadecimal, octal or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal
+ ch = s.next()
+ found := false
+ for isHexadecimal(ch) {
+ ch = s.next()
+ found = true
+ }
+
+ if !found {
+ s.err("illegal hexadecimal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ return token.NUMBER
+ }
+
+ // now it's either something like: 0421(octal) or 0.1231(float)
+ illegalOctal := false
+ for isDecimal(ch) {
+ ch = s.next()
+ if ch == '8' || ch == '9' {
+ // this is just a possibility. For example 0159 is illegal, but
+ // 0159.23 is valid. So we mark a possible illegal octal. If
+ // the next character is not a period, we'll print the error.
+ illegalOctal = true
+ }
+ }
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if illegalOctal {
+ s.err("illegal octal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+ }
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+ // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+ break
+ }
+
+ // Not an anchor match, record the start of a new line
+ lineStart = s.srcPos.Offset
+ }
+
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+ }
+
+ return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ start := n
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ if ch == eof {
+ // If we see an EOF, we halt any more scanning of digits
+ // immediately.
+ break
+ }
+
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ if n != start && ch != eof {
+ // we scanned all digits, put the last non digit char back,
+ // only if we read anything at all
+ s.unread()
+ }
+
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 000000000..5f981eaa2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+ n := len(s)
+ if n < 2 {
+ return "", ErrSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", ErrSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote != '"' {
+ return "", ErrSyntax
+ }
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+ switch quote {
+ case '"':
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ // If we're starting a '${}' then let it through un-unquoted.
+ // Specifically: we don't unquote any characters within the `${}`
+ // section.
+ if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+ buf = append(buf, '$', '{')
+ s = s[2:]
+
+ // Continue reading until we find the closing brace, copying as-is
+ braces := 1
+ for len(s) > 0 && braces > 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return "", ErrSyntax
+ }
+
+ s = s[size:]
+
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+
+ switch r {
+ case '{':
+ braces++
+ case '}':
+ braces--
+ }
+ }
+ if braces != 0 {
+ return "", ErrSyntax
+ }
+ if len(s) == 0 {
+ // If there's no string left, we're done!
+ break
+ } else {
+ // If there's more left, we need to pop back up to the top of the loop
+ // in case there's another interpolation in this string.
+ continue
+ }
+ }
+
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", ErrSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 000000000..59c1bb72d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 000000000..e37c0664e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+ JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+ COMMENT
+
+ identifier_beg
+ IDENT // literals
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ HEREDOC // < 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list is empty, keep the original list
+ if len(ot.List) == 0 {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 000000000..125a5f072
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ pos := p.tok.Pos
+ o.Assign = hcltoken.Pos{
+ Filename: pos.Filename,
+ Offset: pos.Offset,
+ Line: pos.Line,
+ Column: pos.Column,
+ }
+
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // If we have a zero keycount it means that we never got
+ // an object key, i.e. `{ :`. This is a syntax error.
+ if keyCount == 0 {
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ return nil, errors.New("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 000000000..fe3f0f095
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 000000000..59c1bb72d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 000000000..95a0c3eee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 000000000..d9993c292
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+ lexModeUnknown lexModeValue = iota
+ lexModeHcl
+ lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+ var (
+ r rune
+ w int
+ offset int
+ )
+
+ for {
+ r, w = utf8.DecodeRune(v[offset:])
+ offset += w
+ if unicode.IsSpace(r) {
+ continue
+ }
+ if r == '{' {
+ return lexModeJson
+ }
+ break
+ }
+
+ return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 000000000..1fca53c4c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hclParser "github.com/hashicorp/hcl/hcl/parser"
+ jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+ return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+ switch lexMode(in) {
+ case lexModeHcl:
+ return hclParser.Parse(in)
+ case lexModeJson:
+ return jsonParser.Parse(in)
+ }
+
+ return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 000000000..5f0d1fb6a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 000000000..7a950d177
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 000000000..9d2d8a4ba
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 000000000..336142a5e
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 000000000..9a28e57c3
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore
new file mode 100644
index 000000000..e7081ff52
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/.gitignore
@@ -0,0 +1,6 @@
+*.sublime-project
+*.sublime-workspace
+*.un~
+*.swp
+.idea/
+*.iml
diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml
new file mode 100644
index 000000000..baf9031df
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+go:
+ - 1.3.x
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "1.15.x"
+ - "1.16.x"
+ - tip
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
new file mode 100644
index 000000000..ff8d02535
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -0,0 +1,160 @@
+## Changelog
+
+### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020
+
+ * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write
+
+ This patch ensures that backslashes are escaped on write. Existing applications which
+ rely on the old behavior may need to be updated.
+
+ Thanks to [@apesternikov](https://github.com/apesternikov) for the patch.
+
+ * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL()
+
+ Thanks to [@aliras1](https://github.com/aliras1) for the patch.
+
+ * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write()
+
+ Thanks to [@mkjor](https://github.com/mkjor) for the patch.
+
+ * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys
+
+ Thanks to [@mkjor](https://github.com/mkjor) for the patch.
+
+### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019
+
+ * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request
+
+ This patch ensures that in `LoadURL` the response body is always closed.
+
+ Thanks to [@liubog2008](https://github.com/liubog2008) for the patch.
+
+### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018
+
+ * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading
+
+ This adds the option to disable property expansion during loading.
+
+ Thanks to [@kmala](https://github.com/kmala) for the patch.
+
+### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018
+
+ * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases.
+
+ See PR for an example.
+
+ Thanks to [@yobert](https://github.com/yobert) for the fix.
+
+### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018
+
+ * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value
+
+ Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail
+ with a `circular reference error`.
+
+ Thanks to [@yobert](https://github.com/yobert) for the fix.
+
+### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017
+
+ * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces
+
+ * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled
+
+ Thanks to [@mgurov](https://github.com/mgurov) for the fix.
+
+### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017
+
+ * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
+ * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
+
+### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017
+
+ * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency
+ * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc)
+
+### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017
+
+ * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER`
+ * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs
+ * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy
+ * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function
+
+### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016
+
+ * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL.
+ * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string.
+ * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
+
+### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015
+
+ * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags.
+
+### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015
+
+ * Vendored in gopkg.in/check.v1
+
+### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015
+
+ * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs)
+
+### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015
+
+ * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references.
+
+### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015
+
+ * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp)
+
+### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015
+
+ * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
+ * Add clickable links to README
+
+### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014
+
+ * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
+ [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
+
+### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014
+
+ * Added support for single and multi-line comments (reading, writing and updating)
+ * The order of keys is now preserved
+ * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
+ * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
+ * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
+
+### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014
+
+ * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
+
+### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014
+
+ * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
+
+### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014
+
+ * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
+ * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
+
+### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014
+
+* Added support for time.Duration
+* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom)
+* Changed default of MustXXX() failure from panic to log.Fatal
+
+### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014
+
+* Added MustGet... functions
+* Added support for int and uint with range checks on 32 bit platforms
+
+### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014
+
+* Renamed from goproperties to properties
+* Added support for expansion of environment vars in
+ filenames and value expressions
+* Fixed bug where value expressions were not at the
+ start of the string
+
+### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014
+
+* Initial release
diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md
new file mode 100644
index 000000000..79c87e3e6
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/LICENSE.md
@@ -0,0 +1,24 @@
+Copyright (c) 2013-2020, Frank Schroeder
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md
new file mode 100644
index 000000000..e2edda025
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/README.md
@@ -0,0 +1,128 @@
+[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases)
+[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties)
+[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE)
+[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
+
+# Overview
+
+#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why.
+
+properties is a Go library for reading and writing properties files.
+
+It supports reading from multiple files or URLs and Spring style recursive
+property expansion of expressions like `${key}` to their corresponding value.
+Value expressions can refer to other keys like in `${key}` or to environment
+variables like in `${USER}`. Filenames can also contain environment variables
+like in `/home/${USER}/myapp.properties`.
+
+Properties can be decoded into structs, maps, arrays and values through
+struct tags.
+
+Comments and the order of keys are preserved. Comments can be modified
+and can be written to the output.
+
+The properties library supports both ISO-8859-1 and UTF-8 encoded data.
+
+Starting from version 1.3.0 the behavior of the MustXXX() functions is
+configurable by providing a custom `ErrorHandler` function. The default has
+changed from `panic` to `log.Fatal` but this is configurable and custom
+error handling functions can be provided. See the package documentation for
+details.
+
+Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
+
+## Getting Started
+
+```go
+import (
+ "flag"
+ "github.com/magiconair/properties"
+)
+
+func main() {
+ // init from a file
+ p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
+
+ // or multiple files
+ p = properties.MustLoadFiles([]string{
+ "${HOME}/config.properties",
+ "${HOME}/config-${USER}.properties",
+ }, properties.UTF8, true)
+
+ // or from a map
+ p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"})
+
+ // or from a string
+ p = properties.MustLoadString("key=value\nabc=def")
+
+ // or from a URL
+ p = properties.MustLoadURL("http://host/path")
+
+ // or from multiple URLs
+ p = properties.MustLoadURL([]string{
+ "http://host/config",
+ "http://host/config-${USER}",
+ }, true)
+
+ // or from flags
+ p.MustFlag(flag.CommandLine)
+
+ // get values through getters
+ host := p.MustGetString("host")
+ port := p.GetInt("port", 8080)
+
+ // or through Decode
+ type Config struct {
+ Host string `properties:"host"`
+ Port int `properties:"port,default=9000"`
+ Accept []string `properties:"accept,default=image/png;image;gif"`
+ Timeout time.Duration `properties:"timeout,default=5s"`
+ }
+ var cfg Config
+ if err := p.Decode(&cfg); err != nil {
+ log.Fatal(err)
+ }
+}
+
+```
+
+## Installation and Upgrade
+
+```
+$ go get -u github.com/magiconair/properties
+```
+
+## License
+
+2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
+
+## ToDo
+
+* Dump contents with passwords and secrets obscured
+
+## Updated Git tags
+
+#### 13 Feb 2018
+
+I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags
+and I've only recently learned that this doesn't play well with `git describe` 😞
+
+I have replaced all lightweight tags with signed tags using this script which should
+retain the commit date, name and email address. Please run `git pull --tags` to update them.
+
+Worst case you have to reclone the repo.
+
+```shell
+#!/bin/bash
+tag=$1
+echo "Updating $tag"
+date=$(git show ${tag}^0 --format=%aD | head -1)
+email=$(git show ${tag}^0 --format=%aE | head -1)
+name=$(git show ${tag}^0 --format=%aN | head -1)
+GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag}
+```
+
+I apologize for the inconvenience.
+
+Frank
+
diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go
new file mode 100644
index 000000000..3ebf8049c
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/decode.go
@@ -0,0 +1,289 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Decode assigns property values to exported fields of a struct.
+//
+// Decode traverses v recursively and returns an error if a value cannot be
+// converted to the field type or a required value is missing for a field.
+//
+// The following type dependent decodings are used:
+//
+// String, boolean, numeric fields have the value of the property key assigned.
+// The property key name is the name of the field. A different key and a default
+// value can be set in the field's tag. Fields without default value are
+// required. If the value cannot be converted to the field type an error is
+// returned.
+//
+// time.Duration fields have the result of time.ParseDuration() assigned.
+//
+// time.Time fields have the vaule of time.Parse() assigned. The default layout
+// is time.RFC3339 but can be set in the field's tag.
+//
+// Arrays and slices of string, boolean, numeric, time.Duration and time.Time
+// fields have the value interpreted as a comma separated list of values. The
+// individual values are trimmed of whitespace and empty values are ignored. A
+// default value can be provided as a semicolon separated list in the field's
+// tag.
+//
+// Struct fields are decoded recursively using the field name plus "." as
+// prefix. The prefix (without dot) can be overridden in the field's tag.
+// Default values are not supported in the field's tag. Specify them on the
+// fields of the inner struct instead.
+//
+// Map fields must have a key of type string and are decoded recursively by
+// using the field's name plus ".' as prefix and the next element of the key
+// name as map key. The prefix (without dot) can be overridden in the field's
+// tag. Default values are not supported.
+//
+// Examples:
+//
+// // Field is ignored.
+// Field int `properties:"-"`
+//
+// // Field is assigned value of 'Field'.
+// Field int
+//
+// // Field is assigned value of 'myName'.
+// Field int `properties:"myName"`
+//
+// // Field is assigned value of key 'myName' and has a default
+// // value 15 if the key does not exist.
+// Field int `properties:"myName,default=15"`
+//
+// // Field is assigned value of key 'Field' and has a default
+// // value 15 if the key does not exist.
+// Field int `properties:",default=15"`
+//
+// // Field is assigned value of key 'date' and the date
+// // is in format 2006-01-02
+// Field time.Time `properties:"date,layout=2006-01-02"`
+//
+// // Field is assigned the non-empty and whitespace trimmed
+// // values of key 'Field' split by commas.
+// Field []string
+//
+// // Field is assigned the non-empty and whitespace trimmed
+// // values of key 'Field' split by commas and has a default
+// // value ["a", "b", "c"] if the key does not exist.
+// Field []string `properties:",default=a;b;c"`
+//
+// // Field is decoded recursively with "Field." as key prefix.
+// Field SomeStruct
+//
+// // Field is decoded recursively with "myName." as key prefix.
+// Field SomeStruct `properties:"myName"`
+//
+// // Field is decoded recursively with "Field." as key prefix
+// // and the next dotted element of the key as map key.
+// Field map[string]string
+//
+// // Field is decoded recursively with "myName." as key prefix
+// // and the next dotted element of the key as map key.
+// Field map[string]string `properties:"myName"`
+func (p *Properties) Decode(x interface{}) error {
+ t, v := reflect.TypeOf(x), reflect.ValueOf(x)
+ if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct {
+ return fmt.Errorf("not a pointer to struct: %s", t)
+ }
+ if err := dec(p, "", nil, nil, v); err != nil {
+ return err
+ }
+ return nil
+}
+
+func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error {
+ t := v.Type()
+
+ // value returns the property value for key or the default if provided.
+ value := func() (string, error) {
+ if val, ok := p.Get(key); ok {
+ return val, nil
+ }
+ if def != nil {
+ return *def, nil
+ }
+ return "", fmt.Errorf("missing required key %s", key)
+ }
+
+ // conv converts a string to a value of the given type.
+ conv := func(s string, t reflect.Type) (val reflect.Value, err error) {
+ var v interface{}
+
+ switch {
+ case isDuration(t):
+ v, err = time.ParseDuration(s)
+
+ case isTime(t):
+ layout := opts["layout"]
+ if layout == "" {
+ layout = time.RFC3339
+ }
+ v, err = time.Parse(layout, s)
+
+ case isBool(t):
+ v, err = boolVal(s), nil
+
+ case isString(t):
+ v, err = s, nil
+
+ case isFloat(t):
+ v, err = strconv.ParseFloat(s, 64)
+
+ case isInt(t):
+ v, err = strconv.ParseInt(s, 10, 64)
+
+ case isUint(t):
+ v, err = strconv.ParseUint(s, 10, 64)
+
+ default:
+ return reflect.Zero(t), fmt.Errorf("unsupported type %s", t)
+ }
+ if err != nil {
+ return reflect.Zero(t), err
+ }
+ return reflect.ValueOf(v).Convert(t), nil
+ }
+
+ // keydef returns the property key and the default value based on the
+ // name of the struct field and the options in the tag.
+ keydef := func(f reflect.StructField) (string, *string, map[string]string) {
+ _key, _opts := parseTag(f.Tag.Get("properties"))
+
+ var _def *string
+ if d, ok := _opts["default"]; ok {
+ _def = &d
+ }
+ if _key != "" {
+ return _key, _def, _opts
+ }
+ return f.Name, _def, _opts
+ }
+
+ switch {
+ case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t):
+ s, err := value()
+ if err != nil {
+ return err
+ }
+ val, err := conv(s, t)
+ if err != nil {
+ return err
+ }
+ v.Set(val)
+
+ case isPtr(t):
+ return dec(p, key, def, opts, v.Elem())
+
+ case isStruct(t):
+ for i := 0; i < v.NumField(); i++ {
+ fv := v.Field(i)
+ fk, def, opts := keydef(t.Field(i))
+ if !fv.CanSet() {
+ return fmt.Errorf("cannot set %s", t.Field(i).Name)
+ }
+ if fk == "-" {
+ continue
+ }
+ if key != "" {
+ fk = key + "." + fk
+ }
+ if err := dec(p, fk, def, opts, fv); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case isArray(t):
+ val, err := value()
+ if err != nil {
+ return err
+ }
+ vals := split(val, ";")
+ a := reflect.MakeSlice(t, 0, len(vals))
+ for _, s := range vals {
+ val, err := conv(s, t.Elem())
+ if err != nil {
+ return err
+ }
+ a = reflect.Append(a, val)
+ }
+ v.Set(a)
+
+ case isMap(t):
+ valT := t.Elem()
+ m := reflect.MakeMap(t)
+ for postfix := range p.FilterStripPrefix(key + ".").m {
+ pp := strings.SplitN(postfix, ".", 2)
+ mk, mv := pp[0], reflect.New(valT)
+ if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
+ return err
+ }
+ m.SetMapIndex(reflect.ValueOf(mk), mv.Elem())
+ }
+ v.Set(m)
+
+ default:
+ return fmt.Errorf("unsupported type %s", t)
+ }
+ return nil
+}
+
+// split splits a string on sep, trims whitespace of elements
+// and omits empty elements
+func split(s string, sep string) []string {
+ var a []string
+ for _, v := range strings.Split(s, sep) {
+ if v = strings.TrimSpace(v); v != "" {
+ a = append(a, v)
+ }
+ }
+ return a
+}
+
+// parseTag parses a "key,k=v,k=v,..."
+func parseTag(tag string) (key string, opts map[string]string) {
+ opts = map[string]string{}
+ for i, s := range strings.Split(tag, ",") {
+ if i == 0 {
+ key = s
+ continue
+ }
+
+ pp := strings.SplitN(s, "=", 2)
+ if len(pp) == 1 {
+ opts[pp[0]] = ""
+ } else {
+ opts[pp[0]] = pp[1]
+ }
+ }
+ return key, opts
+}
+
+func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice }
+func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool }
+func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
+func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
+func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }
+func isString(t reflect.Type) bool { return t.Kind() == reflect.String }
+func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct }
+func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) }
+func isFloat(t reflect.Type) bool {
+ return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
+}
+func isInt(t reflect.Type) bool {
+ return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64
+}
+func isUint(t reflect.Type) bool {
+ return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64
+}
diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go
new file mode 100644
index 000000000..f8822da2b
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package properties provides functions for reading and writing
+// ISO-8859-1 and UTF-8 encoded .properties files and has
+// support for recursive property expansion.
+//
+// Java properties files are ISO-8859-1 encoded and use Unicode
+// literals for characters outside the ISO character set. Unicode
+// literals can be used in UTF-8 encoded properties files but
+// aren't necessary.
+//
+// To load a single properties file use MustLoadFile():
+//
+// p := properties.MustLoadFile(filename, properties.UTF8)
+//
+// To load multiple properties files use MustLoadFiles()
+// which loads the files in the given order and merges the
+// result. Missing properties files can be ignored if the
+// 'ignoreMissing' flag is set to true.
+//
+// Filenames can contain environment variables which are expanded
+// before loading.
+//
+// f1 := "/etc/myapp/myapp.conf"
+// f2 := "/home/${USER}/myapp.conf"
+// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
+//
+// All of the different key/value delimiters ' ', ':' and '=' are
+// supported as well as the comment characters '!' and '#' and
+// multi-line values.
+//
+// ! this is a comment
+// # and so is this
+//
+// # the following expressions are equal
+// key value
+// key=value
+// key:value
+// key = value
+// key : value
+// key = val\
+// ue
+//
+// Properties stores all comments preceding a key and provides
+// GetComments() and SetComments() methods to retrieve and
+// update them. The convenience functions GetComment() and
+// SetComment() allow access to the last comment. The
+// WriteComment() method writes properties files including
+// the comments and with the keys in the original order.
+// This can be used for sanitizing properties files.
+//
+// Property expansion is recursive and circular references
+// and malformed expressions are not allowed and cause an
+// error. Expansion of environment variables is supported.
+//
+// # standard property
+// key = value
+//
+// # property expansion: key2 = value
+// key2 = ${key}
+//
+// # recursive expansion: key3 = value
+// key3 = ${key2}
+//
+// # circular reference (error)
+// key = ${key}
+//
+// # malformed expression (error)
+// key = ${ke
+//
+// # refers to the users' home dir
+// home = ${HOME}
+//
+// # local key takes precedence over env var: u = foo
+// USER = foo
+// u = ${USER}
+//
+// The default property expansion format is ${key} but can be
+// changed by setting different pre- and postfix values on the
+// Properties object.
+//
+// p := properties.NewProperties()
+// p.Prefix = "#["
+// p.Postfix = "]#"
+//
+// Properties provides convenience functions for getting typed
+// values with default values if the key does not exist or the
+// type conversion failed.
+//
+// # Returns true if the value is either "1", "on", "yes" or "true"
+// # Returns false for every other value and the default value if
+// # the key does not exist.
+// v = p.GetBool("key", false)
+//
+// # Returns the value if the key exists and the format conversion
+// # was successful. Otherwise, the default value is returned.
+// v = p.GetInt64("key", 999)
+// v = p.GetUint64("key", 999)
+// v = p.GetFloat64("key", 123.0)
+// v = p.GetString("key", "def")
+// v = p.GetDuration("key", 999)
+//
+// As an alternative properties may be applied with the standard
+// library's flag implementation at any time.
+//
+// # Standard configuration
+// v = flag.Int("key", 999, "help message")
+// flag.Parse()
+//
+// # Merge p into the flag set
+// p.MustFlag(flag.CommandLine)
+//
+// Properties provides several MustXXX() convenience functions
+// which will terminate the app if an error occurs. The behavior
+// of the failure is configurable and the default is to call
+// log.Fatal(err). To have the MustXXX() functions panic instead
+// of logging the error set a different ErrorHandler before
+// you use the Properties package.
+//
+// properties.ErrorHandler = properties.PanicHandler
+//
+// # Will panic instead of logging an error
+// p := properties.MustLoadFile("config.properties")
+//
+// You can also provide your own ErrorHandler function. The only requirement
+// is that the error handler function must exit after handling the error.
+//
+// properties.ErrorHandler = func(err error) {
+// fmt.Println(err)
+// os.Exit(1)
+// }
+//
+// # Will write to stdout and then exit
+// p := properties.MustLoadFile("config.properties")
+//
+// Properties can also be loaded into a struct via the `Decode`
+// method, e.g.
+//
+// type S struct {
+// A string `properties:"a,default=foo"`
+// D time.Duration `properties:"timeout,default=5s"`
+// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"`
+// }
+//
+// See `Decode()` method for the full documentation.
+//
+// The following documents provide a description of the properties
+// file format.
+//
+// http://en.wikipedia.org/wiki/.properties
+//
+// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
+//
+package properties
diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go
new file mode 100644
index 000000000..74d38dc67
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/integrate.go
@@ -0,0 +1,34 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import "flag"
+
+// MustFlag sets flags that are skipped by dst.Parse when p contains
+// the respective key for flag.Flag.Name.
+//
+// It's use is recommended with command line arguments as in:
+// flag.Parse()
+// p.MustFlag(flag.CommandLine)
+func (p *Properties) MustFlag(dst *flag.FlagSet) {
+ m := make(map[string]*flag.Flag)
+ dst.VisitAll(func(f *flag.Flag) {
+ m[f.Name] = f
+ })
+ dst.Visit(func(f *flag.Flag) {
+ delete(m, f.Name) // overridden
+ })
+
+ for name, f := range m {
+ v, ok := p.Get(name)
+ if !ok {
+ continue
+ }
+
+ if err := f.Value.Set(v); err != nil {
+ ErrorHandler(err)
+ }
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go
new file mode 100644
index 000000000..367166d58
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/lex.go
@@ -0,0 +1,407 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Parts of the lexer are from the template/text/parser package
+// For these parts the following applies:
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file of the go 1.2
+// distribution.
+
+package properties
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType // The type of this item.
+ pos int // The starting position, in bytes, of this item in the input string.
+ val string // The value of this item.
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemEOF
+ itemKey // a key
+ itemValue // a value
+ itemComment // a comment
+)
+
+// defines a constant for EOF
+const eof = -1
+
+// permitted whitespace characters space, FF and TAB
+const whitespace = " \f\t"
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ input string // the string being scanned
+ state stateFn // the next lexing function to enter
+ pos int // current position in the input
+ start int // start position of this item
+ width int // width of last rune read from input
+ lastPos int // position of most recent item returned by nextItem
+ runes []rune // scanned runes for this item
+ items chan item // channel of scanned items
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = w
+ l.pos += l.width
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ i := item{t, l.start, string(l.runes)}
+ l.items <- i
+ l.start = l.pos
+ l.runes = l.runes[:0]
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+// appends the rune to the current value
+func (l *lexer) appendRune(r rune) {
+ l.runes = append(l.runes, r)
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.ContainsRune(valid, l.next()) {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.ContainsRune(valid, l.next()) {
+ }
+ l.backup()
+}
+
+// acceptRunUntil consumes a run of runes up to a terminator.
+func (l *lexer) acceptRunUntil(term rune) {
+ for term != l.next() {
+ }
+ l.backup()
+}
+
+// hasText returns true if the current parsed text is not empty.
+func (l *lexer) isNotEmpty() bool {
+ return l.pos > l.start
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+ return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+ i := <-l.items
+ l.lastPos = i.pos
+ return i
+}
+
+// lex creates a new scanner for the input string.
+func lex(input string) *lexer {
+ l := &lexer{
+ input: input,
+ items: make(chan item),
+ runes: make([]rune, 0, 32),
+ }
+ go l.run()
+ return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+ for l.state = lexBeforeKey(l); l.state != nil; {
+ l.state = l.state(l)
+ }
+}
+
+// state functions
+
+// lexBeforeKey scans until a key begins.
+func lexBeforeKey(l *lexer) stateFn {
+ switch r := l.next(); {
+ case isEOF(r):
+ l.emit(itemEOF)
+ return nil
+
+ case isEOL(r):
+ l.ignore()
+ return lexBeforeKey
+
+ case isComment(r):
+ return lexComment
+
+ case isWhitespace(r):
+ l.ignore()
+ return lexBeforeKey
+
+ default:
+ l.backup()
+ return lexKey
+ }
+}
+
+// lexComment scans a comment line. The comment character has already been scanned.
+func lexComment(l *lexer) stateFn {
+ l.acceptRun(whitespace)
+ l.ignore()
+ for {
+ switch r := l.next(); {
+ case isEOF(r):
+ l.ignore()
+ l.emit(itemEOF)
+ return nil
+ case isEOL(r):
+ l.emit(itemComment)
+ return lexBeforeKey
+ default:
+ l.appendRune(r)
+ }
+ }
+}
+
+// lexKey scans the key up to a delimiter
+func lexKey(l *lexer) stateFn {
+ var r rune
+
+Loop:
+ for {
+ switch r = l.next(); {
+
+ case isEscape(r):
+ err := l.scanEscapeSequence()
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ case isEndOfKey(r):
+ l.backup()
+ break Loop
+
+ case isEOF(r):
+ break Loop
+
+ default:
+ l.appendRune(r)
+ }
+ }
+
+ if len(l.runes) > 0 {
+ l.emit(itemKey)
+ }
+
+ if isEOF(r) {
+ l.emit(itemEOF)
+ return nil
+ }
+
+ return lexBeforeValue
+}
+
+// lexBeforeValue scans the delimiter between key and value.
+// Leading and trailing whitespace is ignored.
+// We expect to be just after the key.
+func lexBeforeValue(l *lexer) stateFn {
+ l.acceptRun(whitespace)
+ l.accept(":=")
+ l.acceptRun(whitespace)
+ l.ignore()
+ return lexValue
+}
+
+// lexValue scans text until the end of the line. We expect to be just after the delimiter.
+func lexValue(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case isEscape(r):
+ if isEOL(l.peek()) {
+ l.next()
+ l.acceptRun(whitespace)
+ } else {
+ err := l.scanEscapeSequence()
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ }
+
+ case isEOL(r):
+ l.emit(itemValue)
+ l.ignore()
+ return lexBeforeKey
+
+ case isEOF(r):
+ l.emit(itemValue)
+ l.emit(itemEOF)
+ return nil
+
+ default:
+ l.appendRune(r)
+ }
+ }
+}
+
+// scanEscapeSequence scans either one of the escaped characters
+// or a unicode literal. We expect to be after the escape character.
+func (l *lexer) scanEscapeSequence() error {
+ switch r := l.next(); {
+
+ case isEscapedCharacter(r):
+ l.appendRune(decodeEscapedCharacter(r))
+ return nil
+
+ case atUnicodeLiteral(r):
+ return l.scanUnicodeLiteral()
+
+ case isEOF(r):
+ return fmt.Errorf("premature EOF")
+
+ // silently drop the escape character and append the rune as is
+ default:
+ l.appendRune(r)
+ return nil
+ }
+}
+
+// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
+func (l *lexer) scanUnicodeLiteral() error {
+ // scan the digits
+ d := make([]rune, 4)
+ for i := 0; i < 4; i++ {
+ d[i] = l.next()
+ if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
+ return fmt.Errorf("invalid unicode literal")
+ }
+ }
+
+ // decode the digits into a rune
+ r, err := strconv.ParseInt(string(d), 16, 0)
+ if err != nil {
+ return err
+ }
+
+ l.appendRune(rune(r))
+ return nil
+}
+
+// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
+func decodeEscapedCharacter(r rune) rune {
+ switch r {
+ case 'f':
+ return '\f'
+ case 'n':
+ return '\n'
+ case 'r':
+ return '\r'
+ case 't':
+ return '\t'
+ default:
+ return r
+ }
+}
+
+// atUnicodeLiteral reports whether we are at a unicode literal.
+// The escape character has already been consumed.
+func atUnicodeLiteral(r rune) bool {
+ return r == 'u'
+}
+
+// isComment reports whether we are at the start of a comment.
+func isComment(r rune) bool {
+ return r == '#' || r == '!'
+}
+
+// isEndOfKey reports whether the rune terminates the current key.
+func isEndOfKey(r rune) bool {
+ return strings.ContainsRune(" \f\t\r\n:=", r)
+}
+
+// isEOF reports whether we are at EOF.
+func isEOF(r rune) bool {
+ return r == eof
+}
+
+// isEOL reports whether we are at a new line character.
+func isEOL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+// isEscape reports whether the rune is the escape character which
+// prefixes unicode literals and other escaped characters.
+func isEscape(r rune) bool {
+ return r == '\\'
+}
+
+// isEscapedCharacter reports whether we are at one of the characters that need escaping.
+// The escape character has already been consumed.
+func isEscapedCharacter(r rune) bool {
+ return strings.ContainsRune(" :=fnrt", r)
+}
+
+// isWhitespace reports whether the rune is a whitespace character.
+func isWhitespace(r rune) bool {
+ return strings.ContainsRune(whitespace, r)
+}
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
new file mode 100644
index 000000000..c83c2dadd
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -0,0 +1,293 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+)
+
+// Encoding specifies encoding of the input data.
+type Encoding uint
+
+const (
+ // utf8Default is a private placeholder for the zero value of Encoding to
+ // ensure that it has the correct meaning. UTF8 is the default encoding but
+ // was assigned a non-zero value which cannot be changed without breaking
+ // existing code. Clients should continue to use the public constants.
+ utf8Default Encoding = iota
+
+ // UTF8 interprets the input data as UTF-8.
+ UTF8
+
+ // ISO_8859_1 interprets the input data as ISO-8859-1.
+ ISO_8859_1
+)
+
+type Loader struct {
+ // Encoding determines how the data from files and byte buffers
+ // is interpreted. For URLs the Content-Type header is used
+ // to determine the encoding of the data.
+ Encoding Encoding
+
+ // DisableExpansion configures the property expansion of the
+ // returned property object. When set to true, the property values
+ // will not be expanded and the Property object will not be checked
+ // for invalid expansion expressions.
+ DisableExpansion bool
+
+ // IgnoreMissing configures whether missing files or URLs which return
+ // 404 are reported as errors. When set to true, missing files and 404
+ // status codes are not reported as errors.
+ IgnoreMissing bool
+}
+
+// Load reads a buffer into a Properties struct.
+func (l *Loader) LoadBytes(buf []byte) (*Properties, error) {
+ return l.loadBytes(buf, l.Encoding)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into
+// a Properties struct. If IgnoreMissing is true then a 404 status code or
+// missing file will not be reported as error. Encoding sets the encoding for
+// files. For the URLs see LoadURL for the Content-Type header and the
+// encoding.
+func (l *Loader) LoadAll(names []string) (*Properties, error) {
+ all := NewProperties()
+ for _, name := range names {
+ n, err := expandName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ var p *Properties
+ switch {
+ case strings.HasPrefix(n, "http://"):
+ p, err = l.LoadURL(n)
+ case strings.HasPrefix(n, "https://"):
+ p, err = l.LoadURL(n)
+ default:
+ p, err = l.LoadFile(n)
+ }
+ if err != nil {
+ return nil, err
+ }
+ all.Merge(p)
+ }
+
+ all.DisableExpansion = l.DisableExpansion
+ if all.DisableExpansion {
+ return all, nil
+ }
+ return all, all.check()
+}
+
+// LoadFile reads a file into a Properties struct.
+// If IgnoreMissing is true then a missing file will not be
+// reported as error.
+func (l *Loader) LoadFile(filename string) (*Properties, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ if l.IgnoreMissing && os.IsNotExist(err) {
+ LogPrintf("properties: %s not found. skipping", filename)
+ return NewProperties(), nil
+ }
+ return nil, err
+ }
+ return l.loadBytes(data, l.Encoding)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+//
+// The encoding is determined via the Content-Type header which
+// should be set to 'text/plain'. If the 'charset' parameter is
+// missing, 'iso-8859-1' or 'latin1' the encoding is set to
+// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
+// encoding is set to UTF-8. A missing content type header is
+// interpreted as 'text/plain; charset=utf-8'.
+func (l *Loader) LoadURL(url string) (*Properties, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == 404 && l.IgnoreMissing {
+ LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
+ return NewProperties(), nil
+ }
+
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
+ }
+
+ ct := resp.Header.Get("Content-Type")
+ ct = strings.Join(strings.Fields(ct), "")
+ var enc Encoding
+ switch strings.ToLower(ct) {
+ case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1":
+ enc = ISO_8859_1
+ case "", "text/plain;charset=utf-8":
+ enc = UTF8
+ default:
+ return nil, fmt.Errorf("properties: invalid content type %s", ct)
+ }
+
+ return l.loadBytes(body, enc)
+}
+
+func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) {
+ p, err := parse(convert(buf, enc))
+ if err != nil {
+ return nil, err
+ }
+ p.DisableExpansion = l.DisableExpansion
+ if p.DisableExpansion {
+ return p, nil
+ }
+ return p, p.check()
+}
+
+// Load reads a buffer into a Properties struct.
+func Load(buf []byte, enc Encoding) (*Properties, error) {
+ l := &Loader{Encoding: enc}
+ return l.LoadBytes(buf)
+}
+
+// LoadString reads an UTF8 string into a properties struct.
+func LoadString(s string) (*Properties, error) {
+ l := &Loader{Encoding: UTF8}
+ return l.LoadBytes([]byte(s))
+}
+
+// LoadMap creates a new Properties struct from a string map.
+func LoadMap(m map[string]string) *Properties {
+ p := NewProperties()
+ for k, v := range m {
+ p.Set(k, v)
+ }
+ return p
+}
+
+// LoadFile reads a file into a Properties struct.
+func LoadFile(filename string, enc Encoding) (*Properties, error) {
+ l := &Loader{Encoding: enc}
+ return l.LoadAll([]string{filename})
+}
+
+// LoadFiles reads multiple files in the given order into
+// a Properties struct. If 'ignoreMissing' is true then
+// non-existent files will not be reported as error.
+func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+ l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+ return l.LoadAll(filenames)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+// See Loader#LoadURL for details.
+func LoadURL(url string) (*Properties, error) {
+ l := &Loader{Encoding: UTF8}
+ return l.LoadAll([]string{url})
+}
+
+// LoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct. If IgnoreMissing is true then a 404 status code will
+// not be reported as error. See Loader#LoadURL for the Content-Type header
+// and the encoding.
+func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
+ l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing}
+ return l.LoadAll(urls)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding.
+func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+ l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+ return l.LoadAll(names)
+}
+
+// MustLoadString reads an UTF8 string into a Properties struct and
+// panics on error.
+func MustLoadString(s string) *Properties {
+ return must(LoadString(s))
+}
+
+// MustLoadFile reads a file into a Properties struct and
+// panics on error.
+func MustLoadFile(filename string, enc Encoding) *Properties {
+ return must(LoadFile(filename, enc))
+}
+
+// MustLoadFiles reads multiple files in the given order into
+// a Properties struct and panics on error. If 'ignoreMissing'
+// is true then non-existent files will not be reported as error.
+func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
+ return must(LoadFiles(filenames, enc, ignoreMissing))
+}
+
+// MustLoadURL reads the content of a URL into a Properties struct and
+// panics on error.
+func MustLoadURL(url string) *Properties {
+ return must(LoadURL(url))
+}
+
+// MustLoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct and panics on error. If 'ignoreMissing' is true then a 404
+// status code will not be reported as error.
+func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
+ return must(LoadURLs(urls, ignoreMissing))
+}
+
+// MustLoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding. It panics on error.
+func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties {
+ return must(LoadAll(names, enc, ignoreMissing))
+}
+
+func must(p *Properties, err error) *Properties {
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return p
+}
+
+// expandName expands ${ENV_VAR} expressions in a name.
+// If the environment variable does not exist then it will be replaced
+// with an empty string. Malformed expressions like "${ENV_VAR" will
+// be reported as error.
+func expandName(name string) (string, error) {
+ return expand(name, []string{}, "${", "}", make(map[string]string))
+}
+
+// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
+// For ISO-8859-1 we can convert each byte straight into a rune since the
+// first 256 unicode code points cover ISO-8859-1.
+func convert(buf []byte, enc Encoding) string {
+ switch enc {
+ case utf8Default, UTF8:
+ return string(buf)
+ case ISO_8859_1:
+ runes := make([]rune, len(buf))
+ for i, b := range buf {
+ runes[i] = rune(b)
+ }
+ return string(runes)
+ default:
+ ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
+ }
+ panic("ErrorHandler should exit")
+}
diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go
new file mode 100644
index 000000000..cdc4a8034
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/parser.go
@@ -0,0 +1,95 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "runtime"
+)
+
+type parser struct {
+ lex *lexer
+}
+
+func parse(input string) (properties *Properties, err error) {
+ p := &parser{lex: lex(input)}
+ defer p.recover(&err)
+
+ properties = NewProperties()
+ key := ""
+ comments := []string{}
+
+ for {
+ token := p.expectOneOf(itemComment, itemKey, itemEOF)
+ switch token.typ {
+ case itemEOF:
+ goto done
+ case itemComment:
+ comments = append(comments, token.val)
+ continue
+ case itemKey:
+ key = token.val
+ if _, ok := properties.m[key]; !ok {
+ properties.k = append(properties.k, key)
+ }
+ }
+
+ token = p.expectOneOf(itemValue, itemEOF)
+ if len(comments) > 0 {
+ properties.c[key] = comments
+ comments = []string{}
+ }
+ switch token.typ {
+ case itemEOF:
+ properties.m[key] = ""
+ goto done
+ case itemValue:
+ properties.m[key] = token.val
+ }
+ }
+
+done:
+ return properties, nil
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
+ panic(fmt.Errorf(format, args...))
+}
+
+func (p *parser) expect(expected itemType) (token item) {
+ token = p.lex.nextItem()
+ if token.typ != expected {
+ p.unexpected(token)
+ }
+ return token
+}
+
+func (p *parser) expectOneOf(expected ...itemType) (token item) {
+ token = p.lex.nextItem()
+ for _, v := range expected {
+ if token.typ == v {
+ return token
+ }
+ }
+ p.unexpected(token)
+ panic("unexpected token")
+}
+
+func (p *parser) unexpected(token item) {
+ p.errorf(token.String())
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (p *parser) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ *errp = e.(error)
+ }
+ return
+}
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
new file mode 100644
index 000000000..1529e7223
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -0,0 +1,854 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
+// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+const maxExpansionDepth = 64
+
+// ErrorHandlerFunc defines the type of function which handles failures
+// of the MustXXX() functions. An error handler function must exit
+// the application after handling the error.
+type ErrorHandlerFunc func(error)
+
+// ErrorHandler is the function which handles failures of the MustXXX()
+// functions. The default is LogFatalHandler.
+var ErrorHandler ErrorHandlerFunc = LogFatalHandler
+
+// LogHandlerFunc defines the function prototype for logging errors.
+type LogHandlerFunc func(fmt string, args ...interface{})
+
+// LogPrintf defines a log handler which uses log.Printf.
+var LogPrintf LogHandlerFunc = log.Printf
+
+// LogFatalHandler handles the error by logging a fatal error and exiting.
+func LogFatalHandler(err error) {
+ log.Fatal(err)
+}
+
+// PanicHandler handles the error by panicking.
+func PanicHandler(err error) {
+ panic(err)
+}
+
+// -----------------------------------------------------------------------------
+
+// A Properties contains the key/value pairs from the properties input.
+// All values are stored in unexpanded form and are expanded at runtime
+type Properties struct {
+ // Pre-/Postfix for property expansion.
+ Prefix string
+ Postfix string
+
+ // DisableExpansion controls the expansion of properties on Get()
+ // and the check for circular references on Set(). When set to
+ // true Properties behaves like a simple key/value store and does
+ // not check for circular references on Get() or on Set().
+ DisableExpansion bool
+
+ // Stores the key/value pairs
+ m map[string]string
+
+ // Stores the comments per key.
+ c map[string][]string
+
+ // Stores the keys in order of appearance.
+ k []string
+
+ // WriteSeparator specifies the separator of key and value while writing the properties.
+ WriteSeparator string
+}
+
+// NewProperties creates a new Properties struct with the default
+// configuration for "${key}" expressions.
+func NewProperties() *Properties {
+ return &Properties{
+ Prefix: "${",
+ Postfix: "}",
+ m: map[string]string{},
+ c: map[string][]string{},
+ k: []string{},
+ }
+}
+
+// Load reads a buffer into the given Properties struct.
+func (p *Properties) Load(buf []byte, enc Encoding) error {
+ l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion}
+ newProperties, err := l.LoadBytes(buf)
+ if err != nil {
+ return err
+ }
+ p.Merge(newProperties)
+ return nil
+}
+
+// Get returns the expanded value for the given key if exists.
+// Otherwise, ok is false.
+func (p *Properties) Get(key string) (value string, ok bool) {
+ v, ok := p.m[key]
+ if p.DisableExpansion {
+ return v, ok
+ }
+ if !ok {
+ return "", false
+ }
+
+ expanded, err := p.expand(key, v)
+
+ // we guarantee that the expanded value is free of
+ // circular references and malformed expressions
+ // so we panic if we still get an error here.
+ if err != nil {
+ ErrorHandler(err)
+ }
+
+ return expanded, true
+}
+
+// MustGet returns the expanded value for the given key if exists.
+// Otherwise, it panics.
+func (p *Properties) MustGet(key string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ ErrorHandler(invalidKeyError(key))
+ panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// ClearComments removes the comments for all keys.
+func (p *Properties) ClearComments() {
+ p.c = map[string][]string{}
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComment returns the last comment before the given key or an empty string.
+func (p *Properties) GetComment(key string) string {
+ comments, ok := p.c[key]
+ if !ok || len(comments) == 0 {
+ return ""
+ }
+ return comments[len(comments)-1]
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComments returns all comments that appeared before the given key or nil.
+func (p *Properties) GetComments(key string) []string {
+ if comments, ok := p.c[key]; ok {
+ return comments
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComment sets the comment for the key.
+func (p *Properties) SetComment(key, comment string) {
+ p.c[key] = []string{comment}
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComments sets the comments for the key. If the comments are nil then
+// all comments for this key are deleted.
+func (p *Properties) SetComments(key string, comments []string) {
+ if comments == nil {
+ delete(p.c, key)
+ return
+ }
+ p.c[key] = comments
+}
+
+// ----------------------------------------------------------------------------
+
+// GetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the default value is returned.
+func (p *Properties) GetBool(key string, def bool) bool {
+ v, err := p.getBool(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the function panics.
+func (p *Properties) MustGetBool(key string) bool {
+ v, err := p.getBool(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getBool(key string) (value bool, err error) {
+ if v, ok := p.Get(key); ok {
+ return boolVal(v), nil
+ }
+ return false, invalidKeyError(key)
+}
+
+func boolVal(v string) bool {
+ v = strings.ToLower(v)
+ return v == "1" || v == "true" || v == "yes" || v == "on"
+}
+
+// ----------------------------------------------------------------------------
+
+// GetDuration parses the expanded value as an time.Duration (in ns) if the
+// key exists. If key does not exist or the value cannot be parsed the default
+// value is returned. In almost all cases you want to use GetParsedDuration().
+func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return time.Duration(v)
+}
+
+// MustGetDuration parses the expanded value as an time.Duration (in ns) if
+// the key exists. If key does not exist or the value cannot be parsed the
+// function panics. In almost all cases you want to use MustGetParsedDuration().
+func (p *Properties) MustGetDuration(key string) time.Duration {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return time.Duration(v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
+ s, ok := p.Get(key)
+ if !ok {
+ return def
+ }
+ v, err := time.ParseDuration(s)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetParsedDuration(key string) time.Duration {
+ s, ok := p.Get(key)
+ if !ok {
+ ErrorHandler(invalidKeyError(key))
+ }
+ v, err := time.ParseDuration(s)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+// ----------------------------------------------------------------------------
+
+// GetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetFloat64(key string, def float64) float64 {
+ v, err := p.getFloat64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetFloat64(key string) float64 {
+ v, err := p.getFloat64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getFloat64(key string) (value float64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseFloat(v, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetInt(key string, def int) int {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return intRangeCheck(key, v)
+}
+
+// MustGetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetInt(key string) int {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return intRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt64 parses the expanded value as an int64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetInt64(key string, def int64) int64 {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetInt64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetInt64(key string) int64 {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getInt64(key string) (value int64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint parses the expanded value as an uint if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetUint(key string, def uint) uint {
+ v, err := p.getUint64(key)
+ if err != nil {
+ return def
+ }
+ return uintRangeCheck(key, v)
+}
+
+// MustGetUint parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetUint(key string) uint {
+ v, err := p.getUint64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return uintRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint64 parses the expanded value as an uint64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetUint64(key string, def uint64) uint64 {
+ v, err := p.getUint64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetUint64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetUint64(key string) uint64 {
+ v, err := p.getUint64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getUint64(key string) (value uint64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetString returns the expanded value for the given key if exists or
+// the default value otherwise.
+func (p *Properties) GetString(key, def string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ return def
+}
+
+// MustGetString returns the expanded value for the given key if exists or
+// panics otherwise.
+func (p *Properties) MustGetString(key string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ ErrorHandler(invalidKeyError(key))
+ panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// Filter returns a new properties object which contains all properties
+// for which the key matches the pattern.
+func (p *Properties) Filter(pattern string) (*Properties, error) {
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.FilterRegexp(re), nil
+}
+
+// FilterRegexp returns a new properties object which contains all properties
+// for which the key matches the regular expression.
+func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
+ pp := NewProperties()
+ for _, k := range p.k {
+ if re.MatchString(k) {
+ // TODO(fs): we are ignoring the error which flags a circular reference.
+ // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+ pp.Set(k, p.m[k])
+ }
+ }
+ return pp
+}
+
+// FilterPrefix returns a new properties object with a subset of all keys
+// with the given prefix.
+func (p *Properties) FilterPrefix(prefix string) *Properties {
+ pp := NewProperties()
+ for _, k := range p.k {
+ if strings.HasPrefix(k, prefix) {
+ // TODO(fs): we are ignoring the error which flags a circular reference.
+ // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+ pp.Set(k, p.m[k])
+ }
+ }
+ return pp
+}
+
+// FilterStripPrefix returns a new properties object with a subset of all keys
+// with the given prefix and the prefix removed from the keys.
+func (p *Properties) FilterStripPrefix(prefix string) *Properties {
+ pp := NewProperties()
+ n := len(prefix)
+ for _, k := range p.k {
+ if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
+ // TODO(fs): we are ignoring the error which flags a circular reference.
+ // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference
+ // TODO(fs): this function should probably return an error but the signature is fixed
+ pp.Set(k[n:], p.m[k])
+ }
+ }
+ return pp
+}
+
+// Len returns the number of keys.
+func (p *Properties) Len() int {
+ return len(p.m)
+}
+
+// Keys returns all keys in the same order as in the input.
+func (p *Properties) Keys() []string {
+ keys := make([]string, len(p.k))
+ copy(keys, p.k)
+ return keys
+}
+
+// Set sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. If the value contains a
+// circular reference or a malformed expression then
+// an error is returned.
+// An empty key is silently ignored.
+func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
+ if key == "" {
+ return "", false, nil
+ }
+
+ // if expansion is disabled we allow circular references
+ if p.DisableExpansion {
+ prev, ok = p.Get(key)
+ p.m[key] = value
+ if !ok {
+ p.k = append(p.k, key)
+ }
+ return prev, ok, nil
+ }
+
+ // to check for a circular reference we temporarily need
+ // to set the new value. If there is an error then revert
+ // to the previous state. Only if all tests are successful
+ // then we add the key to the p.k list.
+ prev, ok = p.Get(key)
+ p.m[key] = value
+
+ // now check for a circular reference
+ _, err = p.expand(key, value)
+ if err != nil {
+
+ // revert to the previous state
+ if ok {
+ p.m[key] = prev
+ } else {
+ delete(p.m, key)
+ }
+
+ return "", false, err
+ }
+
+ if !ok {
+ p.k = append(p.k, key)
+ }
+
+ return prev, ok, nil
+}
+
+// SetValue sets property key to the default string value
+// as defined by fmt.Sprintf("%v").
+func (p *Properties) SetValue(key string, value interface{}) error {
+ _, _, err := p.Set(key, fmt.Sprintf("%v", value))
+ return err
+}
+
+// MustSet sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. An empty key is silently ignored.
+func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
+ prev, ok, err := p.Set(key, value)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return prev, ok
+}
+
+// String returns a string of all expanded 'key = value' pairs.
+func (p *Properties) String() string {
+ var s string
+ for _, key := range p.k {
+ value, _ := p.Get(key)
+ s = fmt.Sprintf("%s%s = %s\n", s, key, value)
+ }
+ return s
+}
+
+// Sort sorts the properties keys in alphabetical order.
+// This is helpfully before writing the properties.
+func (p *Properties) Sort() {
+ sort.Strings(p.k)
+}
+
+// Write writes all unexpanded 'key = value' pairs to the given writer.
+// Write returns the number of bytes written and any write error encountered.
+func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
+ return p.WriteComment(w, "", enc)
+}
+
+// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
+// If prefix is not empty then comments are written with a blank line and the
+// given prefix. The prefix should be either "# " or "! " to be compatible with
+// the properties file format. Otherwise, the properties parser will not be
+// able to read the file back in. It returns the number of bytes written and
+// any write error encountered.
+func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
+ var x int
+
+ for _, key := range p.k {
+ value := p.m[key]
+
+ if prefix != "" {
+ if comments, ok := p.c[key]; ok {
+ // don't print comments if they are all empty
+ allEmpty := true
+ for _, c := range comments {
+ if c != "" {
+ allEmpty = false
+ break
+ }
+ }
+
+ if !allEmpty {
+ // add a blank line between entries but not at the top
+ if len(comments) > 0 && n > 0 {
+ x, err = fmt.Fprintln(w)
+ if err != nil {
+ return
+ }
+ n += x
+ }
+
+ for _, c := range comments {
+ x, err = fmt.Fprintf(w, "%s%s\n", prefix, c)
+ if err != nil {
+ return
+ }
+ n += x
+ }
+ }
+ }
+ }
+ sep := " = "
+ if p.WriteSeparator != "" {
+ sep = p.WriteSeparator
+ }
+ x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc))
+ if err != nil {
+ return
+ }
+ n += x
+ }
+ return
+}
+
+// Map returns a copy of the properties as a map.
+func (p *Properties) Map() map[string]string {
+ m := make(map[string]string)
+ for k, v := range p.m {
+ m[k] = v
+ }
+ return m
+}
+
+// FilterFunc returns a copy of the properties which includes the values which passed all filters.
+func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
+ pp := NewProperties()
+outer:
+ for k, v := range p.m {
+ for _, f := range filters {
+ if !f(k, v) {
+ continue outer
+ }
+ pp.Set(k, v)
+ }
+ }
+ return pp
+}
+
+// ----------------------------------------------------------------------------
+
+// Delete removes the key and its comments.
+func (p *Properties) Delete(key string) {
+ delete(p.m, key)
+ delete(p.c, key)
+ newKeys := []string{}
+ for _, k := range p.k {
+ if k != key {
+ newKeys = append(newKeys, k)
+ }
+ }
+ p.k = newKeys
+}
+
+// Merge merges properties, comments and keys from other *Properties into p
+func (p *Properties) Merge(other *Properties) {
+ for k, v := range other.m {
+ p.m[k] = v
+ }
+ for k, v := range other.c {
+ p.c[k] = v
+ }
+
+outer:
+ for _, otherKey := range other.k {
+ for _, key := range p.k {
+ if otherKey == key {
+ continue outer
+ }
+ }
+ p.k = append(p.k, otherKey)
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+// check expands all values and returns an error if a circular reference or
+// a malformed expression was found.
+func (p *Properties) check() error {
+ for key, value := range p.m {
+ if _, err := p.expand(key, value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *Properties) expand(key, input string) (string, error) {
+ // no pre/postfix -> nothing to expand
+ if p.Prefix == "" && p.Postfix == "" {
+ return input, nil
+ }
+
+ return expand(input, []string{key}, p.Prefix, p.Postfix, p.m)
+}
+
+// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
+// The function keeps track of the keys that were already expanded and stops if it
+// detects a circular reference or a malformed expression of the form '(prefix)key'.
+func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) {
+ if len(keys) > maxExpansionDepth {
+ return "", fmt.Errorf("expansion too deep")
+ }
+
+ for {
+ start := strings.Index(s, prefix)
+ if start == -1 {
+ return s, nil
+ }
+
+ keyStart := start + len(prefix)
+ keyLen := strings.Index(s[keyStart:], postfix)
+ if keyLen == -1 {
+ return "", fmt.Errorf("malformed expression")
+ }
+
+ end := keyStart + keyLen + len(postfix) - 1
+ key := s[keyStart : keyStart+keyLen]
+
+ // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
+
+ for _, k := range keys {
+ if key == k {
+ var b bytes.Buffer
+ b.WriteString("circular reference in:\n")
+ for _, k1 := range keys {
+ fmt.Fprintf(&b, "%s=%s\n", k1, values[k1])
+ }
+ return "", fmt.Errorf(b.String())
+ }
+ }
+
+ val, ok := values[key]
+ if !ok {
+ val = os.Getenv(key)
+ }
+ new_val, err := expand(val, append(keys, key), prefix, postfix, values)
+ if err != nil {
+ return "", err
+ }
+ s = s[:start] + new_val + s[end+1:]
+ }
+ return s, nil
+}
+
+// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
+func encode(s string, special string, enc Encoding) string {
+ switch enc {
+ case UTF8:
+ return encodeUtf8(s, special)
+ case ISO_8859_1:
+ return encodeIso(s, special)
+ default:
+ panic(fmt.Sprintf("unsupported encoding %v", enc))
+ }
+}
+
+func encodeUtf8(s string, special string) string {
+ v := ""
+ for pos := 0; pos < len(s); {
+ r, w := utf8.DecodeRuneInString(s[pos:])
+ pos += w
+ v += escape(r, special)
+ }
+ return v
+}
+
+func encodeIso(s string, special string) string {
+ var r rune
+ var w int
+ var v string
+ for pos := 0; pos < len(s); {
+ switch r, w = utf8.DecodeRuneInString(s[pos:]); {
+ case r < 1<<8: // single byte rune -> escape special chars only
+ v += escape(r, special)
+ case r < 1<<16: // two byte rune -> unicode literal
+ v += fmt.Sprintf("\\u%04x", r)
+ default: // more than two bytes per rune -> can't encode
+ v += "?"
+ }
+ pos += w
+ }
+ return v
+}
+
+func escape(r rune, special string) string {
+ switch r {
+ case '\f':
+ return "\\f"
+ case '\n':
+ return "\\n"
+ case '\r':
+ return "\\r"
+ case '\t':
+ return "\\t"
+ case '\\':
+ return "\\\\"
+ default:
+ if strings.ContainsRune(special, r) {
+ return "\\" + string(r)
+ }
+ return string(r)
+ }
+}
+
+func invalidKeyError(key string) error {
+ return fmt.Errorf("unknown property: %s", key)
+}
diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go
new file mode 100644
index 000000000..b013a2e5e
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/rangecheck.go
@@ -0,0 +1,31 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "math"
+)
+
+// make this a var to overwrite it in a test
+var is32Bit = ^uint(0) == math.MaxUint32
+
+// intRangeCheck checks if the value fits into the int type and
+// panics if it does not.
+func intRangeCheck(key string, v int64) int {
+ if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
+ panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+ }
+ return int(v)
+}
+
+// uintRangeCheck checks if the value fits into the uint type and
+// panics if it does not.
+func uintRangeCheck(key string, v uint64) uint {
+ if is32Bit && v > math.MaxUint32 {
+ panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+ }
+ return uint(v)
+}
diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore
new file mode 100644
index 000000000..7b5883475
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/.dockerignore
@@ -0,0 +1,2 @@
+cmd/tomll/tomll
+cmd/tomljson/tomljson
diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore
new file mode 100644
index 000000000..e6ba63a5c
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/.gitignore
@@ -0,0 +1,5 @@
+test_program/test_program_bin
+fuzz/
+cmd/tomll/tomll
+cmd/tomljson/tomljson
+cmd/tomltestgen/tomltestgen
diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
new file mode 100644
index 000000000..98b9893d3
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
@@ -0,0 +1,132 @@
+## Contributing
+
+Thank you for your interest in go-toml! We appreciate you considering
+contributing to go-toml!
+
+The main goal is the project is to provide an easy-to-use TOML
+implementation for Go that gets the job done and gets out of your way –
+dealing with TOML is probably not the central piece of your project.
+
+As the single maintainer of go-toml, time is scarce. All help, big or
+small, is more than welcomed!
+
+### Ask questions
+
+Any question you may have, somebody else might have it too. Always feel
+free to ask them on the [issues tracker][issues-tracker]. We will try to
+answer them as clearly and quickly as possible, time permitting.
+
+Asking questions also helps us identify areas where the documentation needs
+improvement, or new features that weren't envisioned before. Sometimes, a
+seemingly innocent question leads to the fix of a bug. Don't hesitate and
+ask away!
+
+### Improve the documentation
+
+The best way to share your knowledge and experience with go-toml is to
+improve the documentation. Fix a typo, clarify an interface, add an
+example, anything goes!
+
+The documentation is present in the [README][readme] and thorough the
+source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a
+change to the documentation, create a pull request with your proposed
+changes. For simple changes like that, the easiest way to go is probably
+the "Fork this project and edit the file" button on Github, displayed at
+the top right of the file. Unless it's a trivial change (for example a
+typo), provide a little bit of context in your pull request description or
+commit message.
+
+### Report a bug
+
+Found a bug! Sorry to hear that :(. Help us and other track them down and
+fix by reporting it. [File a new bug report][bug-report] on the [issues
+tracker][issues-tracker]. The template should provide enough guidance on
+what to include. When in doubt: add more details! By reducing ambiguity and
+providing more information, it decreases back and forth and saves everyone
+time.
+
+### Code changes
+
+Want to contribute a patch? Very happy to hear that!
+
+First, some high-level rules:
+
+* A short proposal with some POC code is better than a lengthy piece of
+ text with no code. Code speaks louder than words.
+* No backward-incompatible patch will be accepted unless discussed.
+ Sometimes it's hard, and Go's lack of versioning by default does not
+ help, but we try not to break people's programs unless we absolutely have
+ to.
+* If you are writing a new feature or extending an existing one, make sure
+ to write some documentation.
+* Bug fixes need to be accompanied with regression tests.
+* New code needs to be tested.
+* Your commit messages need to explain why the change is needed, even if
+ already included in the PR description.
+
+It does sound like a lot, but those best practices are here to save time
+overall and continuously improve the quality of the project, which is
+something everyone benefits from.
+
+#### Get started
+
+The fairly standard code contribution process looks like that:
+
+1. [Fork the project][fork].
+2. Make your changes, commit on any branch you like.
+3. [Open up a pull request][pull-request]
+4. Review, potential ask for changes.
+5. Merge. You're in!
+
+Feel free to ask for help! You can create draft pull requests to gather
+some early feedback!
+
+#### Run the tests
+
+You can run tests for go-toml using Go's test tool: `go test ./...`.
+When creating a pull requests, all tests will be ran on Linux on a few Go
+versions (Travis CI), and on Windows using the latest Go version
+(AppVeyor).
+
+#### Style
+
+Try to look around and follow the same format and structure as the rest of
+the code. We enforce using `go fmt` on the whole code base.
+
+---
+
+### Maintainers-only
+
+#### Merge pull request
+
+Checklist:
+
+* Passing CI.
+* Does not introduce backward-incompatible changes (unless discussed).
+* Has relevant doc changes.
+* Has relevant unit tests.
+
+1. Merge using "squash and merge".
+2. Make sure to edit the commit message to keep all the useful information
+ nice and clean.
+3. Make sure the commit title is clear and contains the PR number (#123).
+
+#### New release
+
+1. Go to [releases][releases]. Click on "X commits to master since this
+ release".
+2. Make note of all the changes. Look for backward incompatible changes,
+ new features, and bug fixes.
+3. Pick the new version using the above and semver.
+4. Create a [new release][new-release].
+5. Follow the same format as [1.1.0][release-110].
+
+[issues-tracker]: https://github.com/pelletier/go-toml/issues
+[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
+[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml
+[readme]: ./README.md
+[fork]: https://help.github.com/articles/fork-a-repo
+[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
+[releases]: https://github.com/pelletier/go-toml/releases
+[new-release]: https://github.com/pelletier/go-toml/releases/new
+[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0
diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile
new file mode 100644
index 000000000..fffdb0166
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/Dockerfile
@@ -0,0 +1,11 @@
+FROM golang:1.12-alpine3.9 as builder
+WORKDIR /go/src/github.com/pelletier/go-toml
+COPY . .
+ENV CGO_ENABLED=0
+ENV GOOS=linux
+RUN go install ./...
+
+FROM scratch
+COPY --from=builder /go/bin/tomll /usr/bin/tomll
+COPY --from=builder /go/bin/tomljson /usr/bin/tomljson
+COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml
diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE
new file mode 100644
index 000000000..f414553c2
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/LICENSE
@@ -0,0 +1,247 @@
+The bulk of github.com/pelletier/go-toml is distributed under the MIT license
+(see below), with the exception of localtime.go and localtime.test.go.
+Those two files have been copied over from Google's civil library at revision
+ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache
+2.0 license (see below).
+
+
+github.com/pelletier/go-toml:
+
+
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+localtime.go, localtime_test.go:
+
+Originals:
+ https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
+ https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go
+Changes:
+ * Renamed files from civil* to localtime*.
+ * Package changed from civil to toml.
+ * 'Local' prefix added to all structs.
+License:
+ https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile
new file mode 100644
index 000000000..9e4503aea
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/Makefile
@@ -0,0 +1,29 @@
+export CGO_ENABLED=0
+go := go
+go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1)
+go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2)
+
+out.tools := tomll tomljson jsontoml
+out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz)
+sources := $(wildcard **/*.go)
+
+
+.PHONY:
+tools: $(out.tools)
+
+$(out.tools): $(sources)
+ GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@
+
+.PHONY:
+dist: $(out.dist)
+
+$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: %
+ if [ "$(go.goos)" = "windows" ]; then \
+ tar -cJf $@ $^.exe; \
+ else \
+ tar -cJf $@ $^; \
+ fi
+
+.PHONY:
+clean:
+ rm -rf $(out.tools) $(out.dist)
diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..041cdc4a2
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,5 @@
+**Issue:** add link to pelletier/go-toml issue here
+
+Explanation of what this pull request does.
+
+More detailed description of the decisions being made and the reasons why (if the patch is non-trivial).
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
new file mode 100644
index 000000000..6c061712b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -0,0 +1,176 @@
+# go-toml
+
+Go library for the [TOML](https://toml.io/) format.
+
+This library supports TOML version
+[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3)
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml)
+[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
+[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master)
+[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield)
+
+
+## Development status
+
+**ℹ️ Consider go-toml v2!**
+
+The next version of go-toml is in [active development][v2-dev], and
+[nearing completion][v2-map].
+
+Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs],
+and [much faster][v2-bench]. If you only need reading and writing TOML documents
+(majority of cases), those features are implemented and the API unlikely to
+change.
+
+The remaining features (Document structure editing and tooling) will be added
+shortly. While pull-requests are welcome on v1, no active development is
+expected on it. When v2.0.0 is released, v1 will be deprecated.
+
+👉 [go-toml v2][v2]
+
+[v2]: https://github.com/pelletier/go-toml/tree/v2
+[v2-map]: https://github.com/pelletier/go-toml/discussions/506
+[v2-dev]: https://github.com/pelletier/go-toml/tree/v2
+[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed
+[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks
+
+## Features
+
+Go-toml provides the following features for using data parsed from TOML documents:
+
+* Load TOML documents from files and string data
+* Easily navigate TOML structure using Tree
+* Marshaling and unmarshaling to and from data structures
+* Line & column position data for all parsed elements
+* [Query support similar to JSON-Path](query/)
+* Syntax errors contain line and column numbers
+
+## Import
+
+```go
+import "github.com/pelletier/go-toml"
+```
+
+## Usage example
+
+Read a TOML document:
+
+```go
+config, _ := toml.Load(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+// retrieve data directly
+user := config.Get("postgres.user").(string)
+
+// or using an intermediate object
+postgresConfig := config.Get("postgres").(*toml.Tree)
+password := postgresConfig.Get("password").(string)
+```
+
+Or use Unmarshal:
+
+```go
+type Postgres struct {
+ User string
+ Password string
+}
+type Config struct {
+ Postgres Postgres
+}
+
+doc := []byte(`
+[Postgres]
+User = "pelletier"
+Password = "mypassword"`)
+
+config := Config{}
+toml.Unmarshal(doc, &config)
+fmt.Println("user=", config.Postgres.User)
+```
+
+Or use a query:
+
+```go
+// use a query to gather elements without walking the tree
+q, _ := query.Compile("$..[user,password]")
+results := q.Execute(config)
+for ii, item := range results.Values() {
+ fmt.Printf("Query result %d: %v\n", ii, item)
+}
+```
+
+## Documentation
+
+The documentation and additional examples are available at
+[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml).
+
+## Tools
+
+Go-toml provides three handy command line tools:
+
+* `tomll`: Reads TOML files and lints them.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/tomll
+ tomll --help
+ ```
+* `tomljson`: Reads a TOML file and outputs its JSON representation.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/tomljson
+ tomljson --help
+ ```
+
+ * `jsontoml`: Reads a JSON file and outputs a TOML representation.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/jsontoml
+ jsontoml --help
+ ```
+
+### Docker image
+
+Those tools are also available as a Docker image from
+[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to
+use `tomljson`:
+
+```
+docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml
+```
+
+Only master (`latest`) and tagged versions are published to dockerhub. You
+can build your own image as usual:
+
+```
+docker build -t go-toml .
+```
+
+## Contribute
+
+Feel free to report bugs and patches using GitHub's pull requests system on
+[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be
+much appreciated!
+
+### Run tests
+
+`go test ./...`
+
+### Fuzzing
+
+The script `./fuzz.sh` is available to
+run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml.
+
+## Versioning
+
+Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
+of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
+this document. The last two major versions of Go are supported
+(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
+
+## License
+
+The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE).
diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml
new file mode 100644
index 000000000..4af198b4d
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml
@@ -0,0 +1,188 @@
+trigger:
+- master
+
+stages:
+- stage: run_checks
+ displayName: "Check"
+ dependsOn: []
+ jobs:
+ - job: fmt
+ displayName: "fmt"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.16"
+ inputs:
+ version: "1.16"
+ - task: Go@0
+ displayName: "go fmt ./..."
+ inputs:
+ command: 'custom'
+ customCommand: 'fmt'
+ arguments: './...'
+ - job: coverage
+ displayName: "coverage"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.16"
+ inputs:
+ version: "1.16"
+ - task: Go@0
+ displayName: "Generate coverage"
+ inputs:
+ command: 'test'
+ arguments: "-race -coverprofile=coverage.txt -covermode=atomic"
+ - task: Bash@3
+ inputs:
+ targetType: 'inline'
+ script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}'
+ env:
+ CODECOV_TOKEN: $(CODECOV_TOKEN)
+ - job: benchmark
+ displayName: "benchmark"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.16"
+ inputs:
+ version: "1.16"
+ - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
+ - task: Bash@3
+ inputs:
+ filePath: './benchmark.sh'
+ arguments: "master $(Build.Repository.Uri)"
+
+ - job: go_unit_tests
+ displayName: "unit tests"
+ strategy:
+ matrix:
+ linux 1.16:
+ goVersion: '1.16'
+ imageName: 'ubuntu-latest'
+ mac 1.16:
+ goVersion: '1.16'
+ imageName: 'macOS-latest'
+ windows 1.16:
+ goVersion: '1.16'
+ imageName: 'windows-latest'
+ linux 1.15:
+ goVersion: '1.15'
+ imageName: 'ubuntu-latest'
+ mac 1.15:
+ goVersion: '1.15'
+ imageName: 'macOS-latest'
+ windows 1.15:
+ goVersion: '1.15'
+ imageName: 'windows-latest'
+ pool:
+ vmImage: $(imageName)
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go $(goVersion)"
+ inputs:
+ version: $(goVersion)
+ - task: Go@0
+ displayName: "go test ./..."
+ inputs:
+ command: 'test'
+ arguments: './...'
+- stage: build_binaries
+ displayName: "Build binaries"
+ dependsOn: run_checks
+ jobs:
+ - job: build_binary
+ displayName: "Build binary"
+ strategy:
+ matrix:
+ linux_amd64:
+ GOOS: linux
+ GOARCH: amd64
+ darwin_amd64:
+ GOOS: darwin
+ GOARCH: amd64
+ windows_amd64:
+ GOOS: windows
+ GOARCH: amd64
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go"
+ inputs:
+ version: 1.16
+ - task: Bash@3
+ inputs:
+ targetType: inline
+ script: "make dist"
+ env:
+ go.goos: $(GOOS)
+ go.goarch: $(GOARCH)
+ - task: CopyFiles@2
+ inputs:
+ sourceFolder: '$(Build.SourcesDirectory)'
+ contents: '*.tar.xz'
+ TargetFolder: '$(Build.ArtifactStagingDirectory)'
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: '$(Build.ArtifactStagingDirectory)'
+ artifactName: binaries
+- stage: build_binaries_manifest
+ displayName: "Build binaries manifest"
+ dependsOn: build_binaries
+ jobs:
+ - job: build_manifest
+ displayName: "Build binaries manifest"
+ steps:
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ buildType: 'current'
+ downloadType: 'single'
+ artifactName: 'binaries'
+ downloadPath: '$(Build.SourcesDirectory)'
+ - task: Bash@3
+ inputs:
+ targetType: inline
+ script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt"
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: '$(Build.ArtifactStagingDirectory)'
+ artifactName: manifest
+
+- stage: build_docker_image
+ displayName: "Build Docker image"
+ dependsOn: run_checks
+ jobs:
+ - job: build
+ displayName: "Build"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: Docker@2
+ inputs:
+ command: 'build'
+ Dockerfile: 'Dockerfile'
+ buildContext: '.'
+ addPipelineData: false
+
+- stage: publish_docker_image
+ displayName: "Publish Docker image"
+ dependsOn: build_docker_image
+ condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
+ jobs:
+ - job: publish
+ displayName: "Publish"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: Docker@2
+ inputs:
+ containerRegistry: 'DockerHub'
+ repository: 'pelletier/go-toml'
+ command: 'buildAndPush'
+ Dockerfile: 'Dockerfile'
+ buildContext: '.'
+ tags: 'latest'
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh
new file mode 100644
index 000000000..a69d3040f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+set -ex
+
+reference_ref=${1:-master}
+reference_git=${2:-.}
+
+if ! `hash benchstat 2>/dev/null`; then
+ echo "Installing benchstat"
+ go get golang.org/x/perf/cmd/benchstat
+fi
+
+tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
+ref_tempdir="${tempdir}/ref"
+ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
+local_benchmark="`pwd`/benchmark-local.txt"
+
+echo "=== ${reference_ref} (${ref_tempdir})"
+git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
+pushd ${ref_tempdir} >/dev/null
+git checkout ${reference_ref} >/dev/null 2>/dev/null
+go test -bench=. -benchmem | tee ${ref_benchmark}
+cd benchmark
+go test -bench=. -benchmem | tee -a ${ref_benchmark}
+popd >/dev/null
+
+echo ""
+echo "=== local"
+go test -bench=. -benchmem | tee ${local_benchmark}
+cd benchmark
+go test -bench=. -benchmem | tee -a ${local_benchmark}
+
+echo ""
+echo "=== diff"
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
new file mode 100644
index 000000000..a1406a32b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -0,0 +1,23 @@
+// Package toml is a TOML parser and manipulation library.
+//
+// This version supports the specification as described in
+// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md
+//
+// Marshaling
+//
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
+//
+// TOML document as a tree
+//
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
+//
+// JSONPath-like queries
+//
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrieve elements of a TOML document using a
+// single expression. See the package documentation for more information.
+//
+package toml
diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml
new file mode 100644
index 000000000..780d9c68f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml
@@ -0,0 +1,30 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml
new file mode 100644
index 000000000..f45bf88b8
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example.toml
@@ -0,0 +1,30 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go
new file mode 100644
index 000000000..14570c8d3
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.go
@@ -0,0 +1,31 @@
+// +build gofuzz
+
+package toml
+
+func Fuzz(data []byte) int {
+ tree, err := LoadBytes(data)
+ if err != nil {
+ if tree != nil {
+ panic("tree must be nil if there is an error")
+ }
+ return 0
+ }
+
+ str, err := tree.ToTomlString()
+ if err != nil {
+ if str != "" {
+ panic(`str must be "" if there is an error`)
+ }
+ panic(err)
+ }
+
+ tree, err = Load(str)
+ if err != nil {
+ if tree != nil {
+ panic("tree must be nil if there is an error")
+ }
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh
new file mode 100644
index 000000000..3204b4c44
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.sh
@@ -0,0 +1,15 @@
+#! /bin/sh
+set -eu
+
+go get github.com/dvyukov/go-fuzz/go-fuzz
+go get github.com/dvyukov/go-fuzz/go-fuzz-build
+
+if [ ! -e toml-fuzz.zip ]; then
+ go-fuzz-build github.com/pelletier/go-toml
+fi
+
+rm -fr fuzz
+mkdir -p fuzz/corpus
+cp *.toml fuzz/corpus
+
+go-fuzz -bin=toml-fuzz.zip -workdir=fuzz
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
new file mode 100644
index 000000000..e091500b2
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -0,0 +1,112 @@
+// Parsing keys handling both bare and quoted keys.
+
+package toml
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Convert the bare key group string to an array.
+// The input supports double quotation and single quotation,
+// but escape sequences are not supported. Lexers must unescape them beforehand.
+func parseKey(key string) ([]string, error) {
+ runes := []rune(key)
+ var groups []string
+
+ if len(key) == 0 {
+ return nil, errors.New("empty key")
+ }
+
+ idx := 0
+ for idx < len(runes) {
+ for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
+ // skip leading whitespace
+ }
+ if idx >= len(runes) {
+ break
+ }
+ r := runes[idx]
+ if isValidBareChar(r) {
+ // parse bare key
+ startIdx := idx
+ endIdx := -1
+ idx++
+ for idx < len(runes) {
+ r = runes[idx]
+ if isValidBareChar(r) {
+ idx++
+ } else if r == '.' {
+ endIdx = idx
+ break
+ } else if isSpace(r) {
+ endIdx = idx
+ for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
+ // skip trailing whitespace
+ }
+ if idx < len(runes) && runes[idx] != '.' {
+ return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx])
+ }
+ break
+ } else {
+ return nil, fmt.Errorf("invalid bare key character: %c", r)
+ }
+ }
+ if endIdx == -1 {
+ endIdx = idx
+ }
+ groups = append(groups, string(runes[startIdx:endIdx]))
+ } else if r == '\'' {
+ // parse single quoted key
+ idx++
+ startIdx := idx
+ for {
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unclosed single-quoted key")
+ }
+ r = runes[idx]
+ if r == '\'' {
+ groups = append(groups, string(runes[startIdx:idx]))
+ idx++
+ break
+ }
+ idx++
+ }
+ } else if r == '"' {
+ // parse double quoted key
+ idx++
+ startIdx := idx
+ for {
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unclosed double-quoted key")
+ }
+ r = runes[idx]
+ if r == '"' {
+ groups = append(groups, string(runes[startIdx:idx]))
+ idx++
+ break
+ }
+ idx++
+ }
+ } else if r == '.' {
+ idx++
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unexpected end of key")
+ }
+ r = runes[idx]
+ if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' {
+ return nil, fmt.Errorf("expecting key part after dot")
+ }
+ } else {
+ return nil, fmt.Errorf("invalid key character: %c", r)
+ }
+ }
+ if len(groups) == 0 {
+ return nil, fmt.Errorf("empty key")
+ }
+ return groups, nil
+}
+
+func isValidBareChar(r rune) bool {
+ return isAlphanumeric(r) || r == '-' || isDigit(r)
+}
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
new file mode 100644
index 000000000..313908e3e
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -0,0 +1,1031 @@
+// TOML lexer.
+//
+// Written using the principles developed by Rob Pike in
+// http://www.youtube.com/watch?v=HxaD_trXwRE
+
+package toml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Define state functions
+type tomlLexStateFn func() tomlLexStateFn
+
+// Define lexer
+type tomlLexer struct {
+ inputIdx int
+ input []rune // Textual source
+ currentTokenStart int
+ currentTokenStop int
+ tokens []token
+ brackets []rune
+ line int
+ col int
+ endbufferLine int
+ endbufferCol int
+}
+
+// Basic read operations on input
+
+func (l *tomlLexer) read() rune {
+ r := l.peek()
+ if r == '\n' {
+ l.endbufferLine++
+ l.endbufferCol = 1
+ } else {
+ l.endbufferCol++
+ }
+ l.inputIdx++
+ return r
+}
+
+func (l *tomlLexer) next() rune {
+ r := l.read()
+
+ if r != eof {
+ l.currentTokenStop++
+ }
+ return r
+}
+
+func (l *tomlLexer) ignore() {
+ l.currentTokenStart = l.currentTokenStop
+ l.line = l.endbufferLine
+ l.col = l.endbufferCol
+}
+
+func (l *tomlLexer) skip() {
+ l.next()
+ l.ignore()
+}
+
+func (l *tomlLexer) fastForward(n int) {
+ for i := 0; i < n; i++ {
+ l.next()
+ }
+}
+
+func (l *tomlLexer) emitWithValue(t tokenType, value string) {
+ l.tokens = append(l.tokens, token{
+ Position: Position{l.line, l.col},
+ typ: t,
+ val: value,
+ })
+ l.ignore()
+}
+
+func (l *tomlLexer) emit(t tokenType) {
+ l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
+}
+
+func (l *tomlLexer) peek() rune {
+ if l.inputIdx >= len(l.input) {
+ return eof
+ }
+ return l.input[l.inputIdx]
+}
+
+func (l *tomlLexer) peekString(size int) string {
+ maxIdx := len(l.input)
+ upperIdx := l.inputIdx + size // FIXME: potential overflow
+ if upperIdx > maxIdx {
+ upperIdx = maxIdx
+ }
+ return string(l.input[l.inputIdx:upperIdx])
+}
+
+func (l *tomlLexer) follow(next string) bool {
+ return next == l.peekString(len(next))
+}
+
+// Error management
+
+func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
+ l.tokens = append(l.tokens, token{
+ Position: Position{l.line, l.col},
+ typ: tokenError,
+ val: fmt.Sprintf(format, args...),
+ })
+ return nil
+}
+
+// State functions
+
+func (l *tomlLexer) lexVoid() tomlLexStateFn {
+ for {
+ next := l.peek()
+ switch next {
+ case '}': // after '{'
+ return l.lexRightCurlyBrace
+ case '[':
+ return l.lexTableKey
+ case '#':
+ return l.lexComment(l.lexVoid)
+ case '=':
+ return l.lexEqual
+ case '\r':
+ fallthrough
+ case '\n':
+ l.skip()
+ continue
+ }
+
+ if isSpace(next) {
+ l.skip()
+ }
+
+ if isKeyStartChar(next) {
+ return l.lexKey
+ }
+
+ if next == eof {
+ l.next()
+ break
+ }
+ }
+
+ l.emit(tokenEOF)
+ return nil
+}
+
+func (l *tomlLexer) lexRvalue() tomlLexStateFn {
+ for {
+ next := l.peek()
+ switch next {
+ case '.':
+ return l.errorf("cannot start float with a dot")
+ case '=':
+ return l.lexEqual
+ case '[':
+ return l.lexLeftBracket
+ case ']':
+ return l.lexRightBracket
+ case '{':
+ return l.lexLeftCurlyBrace
+ case '}':
+ return l.lexRightCurlyBrace
+ case '#':
+ return l.lexComment(l.lexRvalue)
+ case '"':
+ return l.lexString
+ case '\'':
+ return l.lexLiteralString
+ case ',':
+ return l.lexComma
+ case '\r':
+ fallthrough
+ case '\n':
+ l.skip()
+ if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' {
+ return l.lexRvalue
+ }
+ return l.lexVoid
+ }
+
+ if l.follow("true") {
+ return l.lexTrue
+ }
+
+ if l.follow("false") {
+ return l.lexFalse
+ }
+
+ if l.follow("inf") {
+ return l.lexInf
+ }
+
+ if l.follow("nan") {
+ return l.lexNan
+ }
+
+ if isSpace(next) {
+ l.skip()
+ continue
+ }
+
+ if next == eof {
+ l.next()
+ break
+ }
+
+ if next == '+' || next == '-' {
+ return l.lexNumber
+ }
+
+ if isDigit(next) {
+ return l.lexDateTimeOrNumber
+ }
+
+ return l.errorf("no value can start with %c", next)
+ }
+
+ l.emit(tokenEOF)
+ return nil
+}
+
+func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn {
+ // Could be either a date/time, or a digit.
+ // The options for date/times are:
+ // YYYY-... => date or date-time
+ // HH:... => time
+ // Anything else should be a number.
+
+ lookAhead := l.peekString(5)
+ if len(lookAhead) < 3 {
+ return l.lexNumber()
+ }
+
+ for idx, r := range lookAhead {
+ if !isDigit(r) {
+ if idx == 2 && r == ':' {
+ return l.lexDateTimeOrTime()
+ }
+ if idx == 4 && r == '-' {
+ return l.lexDateTimeOrTime()
+ }
+ return l.lexNumber()
+ }
+ }
+ return l.lexNumber()
+}
+
+func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
+ l.next()
+ l.emit(tokenLeftCurlyBrace)
+ l.brackets = append(l.brackets, '{')
+ return l.lexVoid
+}
+
+func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
+ l.next()
+ l.emit(tokenRightCurlyBrace)
+ if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' {
+ return l.errorf("cannot have '}' here")
+ }
+ l.brackets = l.brackets[:len(l.brackets)-1]
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn {
+ // Example matches:
+ // 1979-05-27T07:32:00Z
+ // 1979-05-27T00:32:00-07:00
+ // 1979-05-27T00:32:00.999999-07:00
+ // 1979-05-27 07:32:00Z
+ // 1979-05-27 00:32:00-07:00
+ // 1979-05-27 00:32:00.999999-07:00
+ // 1979-05-27T07:32:00
+ // 1979-05-27T00:32:00.999999
+ // 1979-05-27 07:32:00
+ // 1979-05-27 00:32:00.999999
+ // 1979-05-27
+ // 07:32:00
+ // 00:32:00.999999
+
+ // we already know those two are digits
+ l.next()
+ l.next()
+
+ // Got 2 digits. At that point it could be either a time or a date(-time).
+
+ r := l.next()
+ if r == ':' {
+ return l.lexTime()
+ }
+
+ return l.lexDateTime()
+}
+
+func (l *tomlLexer) lexDateTime() tomlLexStateFn {
+ // This state accepts an offset date-time, a local date-time, or a local date.
+ //
+ // v--- cursor
+ // 1979-05-27T07:32:00Z
+ // 1979-05-27T00:32:00-07:00
+ // 1979-05-27T00:32:00.999999-07:00
+ // 1979-05-27 07:32:00Z
+ // 1979-05-27 00:32:00-07:00
+ // 1979-05-27 00:32:00.999999-07:00
+ // 1979-05-27T07:32:00
+ // 1979-05-27T00:32:00.999999
+ // 1979-05-27 07:32:00
+ // 1979-05-27 00:32:00.999999
+ // 1979-05-27
+
+ // date
+
+ // already checked by lexRvalue
+ l.next() // digit
+ l.next() // -
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid month digit in date: %c", r)
+ }
+ }
+
+ r := l.next()
+ if r != '-' {
+ return l.errorf("expected - to separate month of a date, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid day digit in date: %c", r)
+ }
+ }
+
+ l.emit(tokenLocalDate)
+
+ r = l.peek()
+
+ if r == eof {
+
+ return l.lexRvalue
+ }
+
+ if r != ' ' && r != 'T' {
+ return l.errorf("incorrect date/time separation character: %c", r)
+ }
+
+ if r == ' ' {
+ lookAhead := l.peekString(3)[1:]
+ if len(lookAhead) < 2 {
+ return l.lexRvalue
+ }
+ for _, r := range lookAhead {
+ if !isDigit(r) {
+ return l.lexRvalue
+ }
+ }
+ }
+
+ l.skip() // skip the T or ' '
+
+ // time
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid hour digit in time: %c", r)
+ }
+ }
+
+ r = l.next()
+ if r != ':' {
+ return l.errorf("time hour/minute separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid minute digit in time: %c", r)
+ }
+ }
+
+ r = l.next()
+ if r != ':' {
+ return l.errorf("time minute/second separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid second digit in time: %c", r)
+ }
+ }
+
+ r = l.peek()
+ if r == '.' {
+ l.next()
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("expected at least one digit in time's fraction, not %c", r)
+ }
+
+ for {
+ r := l.peek()
+ if !isDigit(r) {
+ break
+ }
+ l.next()
+ }
+ }
+
+ l.emit(tokenLocalTime)
+
+ return l.lexTimeOffset
+
+}
+
+func (l *tomlLexer) lexTimeOffset() tomlLexStateFn {
+ // potential offset
+
+ // Z
+ // -07:00
+ // +07:00
+ // nothing
+
+ r := l.peek()
+
+ if r == 'Z' {
+ l.next()
+ l.emit(tokenTimeOffset)
+ } else if r == '+' || r == '-' {
+ l.next()
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid hour digit in time offset: %c", r)
+ }
+ }
+
+ r = l.next()
+ if r != ':' {
+ return l.errorf("time offset hour/minute separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid minute digit in time offset: %c", r)
+ }
+ }
+
+ l.emit(tokenTimeOffset)
+ }
+
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTime() tomlLexStateFn {
+ // v--- cursor
+ // 07:32:00
+ // 00:32:00.999999
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid minute digit in time: %c", r)
+ }
+ }
+
+ r := l.next()
+ if r != ':' {
+ return l.errorf("time minute/second separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid second digit in time: %c", r)
+ }
+ }
+
+ r = l.peek()
+ if r == '.' {
+ l.next()
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("expected at least one digit in time's fraction, not %c", r)
+ }
+
+ for {
+ r := l.peek()
+ if !isDigit(r) {
+ break
+ }
+ l.next()
+ }
+ }
+
+ l.emit(tokenLocalTime)
+ return l.lexRvalue
+
+}
+
+func (l *tomlLexer) lexTrue() tomlLexStateFn {
+ l.fastForward(4)
+ l.emit(tokenTrue)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexFalse() tomlLexStateFn {
+ l.fastForward(5)
+ l.emit(tokenFalse)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexInf() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenInf)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexNan() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenNan)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexEqual() tomlLexStateFn {
+ l.next()
+ l.emit(tokenEqual)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexComma() tomlLexStateFn {
+ l.next()
+ l.emit(tokenComma)
+ if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' {
+ return l.lexVoid
+ }
+ return l.lexRvalue
+}
+
+// Parse the key and emits its value without escape sequences.
+// bare keys, basic string keys and literal string keys are supported.
+func (l *tomlLexer) lexKey() tomlLexStateFn {
+ var sb strings.Builder
+
+ for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() {
+ if r == '"' {
+ l.next()
+ str, err := l.lexStringAsString(`"`, false, true)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ sb.WriteString("\"")
+ sb.WriteString(str)
+ sb.WriteString("\"")
+ l.next()
+ continue
+ } else if r == '\'' {
+ l.next()
+ str, err := l.lexLiteralStringAsString(`'`, false)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ sb.WriteString("'")
+ sb.WriteString(str)
+ sb.WriteString("'")
+ l.next()
+ continue
+ } else if r == '\n' {
+ return l.errorf("keys cannot contain new lines")
+ } else if isSpace(r) {
+ var str strings.Builder
+ str.WriteString(" ")
+
+ // skip trailing whitespace
+ l.next()
+ for r = l.peek(); isSpace(r); r = l.peek() {
+ str.WriteRune(r)
+ l.next()
+ }
+ // break loop if not a dot
+ if r != '.' {
+ break
+ }
+ str.WriteString(".")
+ // skip trailing whitespace after dot
+ l.next()
+ for r = l.peek(); isSpace(r); r = l.peek() {
+ str.WriteRune(r)
+ l.next()
+ }
+ sb.WriteString(str.String())
+ continue
+ } else if r == '.' {
+ // skip
+ } else if !isValidBareChar(r) {
+ return l.errorf("keys cannot contain %c character", r)
+ }
+ sb.WriteRune(r)
+ l.next()
+ }
+ l.emitWithValue(tokenKey, sb.String())
+ return l.lexVoid
+}
+
+func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
+ return func() tomlLexStateFn {
+ for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
+ if next == '\r' && l.follow("\r\n") {
+ break
+ }
+ l.next()
+ }
+ l.ignore()
+ return previousState
+ }
+}
+
+func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
+ l.next()
+ l.emit(tokenLeftBracket)
+ l.brackets = append(l.brackets, '[')
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) {
+ var sb strings.Builder
+
+ if discardLeadingNewLine {
+ if l.follow("\r\n") {
+ l.skip()
+ l.skip()
+ } else if l.peek() == '\n' {
+ l.skip()
+ }
+ }
+
+ // find end of string
+ for {
+ if l.follow(terminator) {
+ return sb.String(), nil
+ }
+
+ next := l.peek()
+ if next == eof {
+ break
+ }
+ sb.WriteRune(l.next())
+ }
+
+ return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
+ l.skip()
+
+ // handle special case for triple-quote
+ terminator := "'"
+ discardLeadingNewLine := false
+ if l.follow("''") {
+ l.skip()
+ l.skip()
+ terminator = "'''"
+ discardLeadingNewLine = true
+ }
+
+ str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ l.emitWithValue(tokenString, str)
+ l.fastForward(len(terminator))
+ l.ignore()
+ return l.lexRvalue
+}
+
+// Lex a string and return the results as a string.
+// Terminator is the substring indicating the end of the token.
+// The resulting string does not include the terminator.
+func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) {
+ var sb strings.Builder
+
+ if discardLeadingNewLine {
+ if l.follow("\r\n") {
+ l.skip()
+ l.skip()
+ } else if l.peek() == '\n' {
+ l.skip()
+ }
+ }
+
+ for {
+ if l.follow(terminator) {
+ return sb.String(), nil
+ }
+
+ if l.follow("\\") {
+ l.next()
+ switch l.peek() {
+ case '\r':
+ fallthrough
+ case '\n':
+ fallthrough
+ case '\t':
+ fallthrough
+ case ' ':
+ // skip all whitespace chars following backslash
+ for strings.ContainsRune("\r\n\t ", l.peek()) {
+ l.next()
+ }
+ case '"':
+ sb.WriteString("\"")
+ l.next()
+ case 'n':
+ sb.WriteString("\n")
+ l.next()
+ case 'b':
+ sb.WriteString("\b")
+ l.next()
+ case 'f':
+ sb.WriteString("\f")
+ l.next()
+ case '/':
+ sb.WriteString("/")
+ l.next()
+ case 't':
+ sb.WriteString("\t")
+ l.next()
+ case 'r':
+ sb.WriteString("\r")
+ l.next()
+ case '\\':
+ sb.WriteString("\\")
+ l.next()
+ case 'u':
+ l.next()
+ var code strings.Builder
+ for i := 0; i < 4; i++ {
+ c := l.peek()
+ if !isHexDigit(c) {
+ return "", errors.New("unfinished unicode escape")
+ }
+ l.next()
+ code.WriteRune(c)
+ }
+ intcode, err := strconv.ParseInt(code.String(), 16, 32)
+ if err != nil {
+ return "", errors.New("invalid unicode escape: \\u" + code.String())
+ }
+ sb.WriteRune(rune(intcode))
+ case 'U':
+ l.next()
+ var code strings.Builder
+ for i := 0; i < 8; i++ {
+ c := l.peek()
+ if !isHexDigit(c) {
+ return "", errors.New("unfinished unicode escape")
+ }
+ l.next()
+ code.WriteRune(c)
+ }
+ intcode, err := strconv.ParseInt(code.String(), 16, 64)
+ if err != nil {
+ return "", errors.New("invalid unicode escape: \\U" + code.String())
+ }
+ sb.WriteRune(rune(intcode))
+ default:
+ return "", errors.New("invalid escape sequence: \\" + string(l.peek()))
+ }
+ } else {
+ r := l.peek()
+
+ if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) {
+ return "", fmt.Errorf("unescaped control character %U", r)
+ }
+ l.next()
+ sb.WriteRune(r)
+ }
+
+ if l.peek() == eof {
+ break
+ }
+ }
+
+ return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexString() tomlLexStateFn {
+ l.skip()
+
+ // handle special case for triple-quote
+ terminator := `"`
+ discardLeadingNewLine := false
+ acceptNewLines := false
+ if l.follow(`""`) {
+ l.skip()
+ l.skip()
+ terminator = `"""`
+ discardLeadingNewLine = true
+ acceptNewLines = true
+ }
+
+ str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ l.emitWithValue(tokenString, str)
+ l.fastForward(len(terminator))
+ l.ignore()
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTableKey() tomlLexStateFn {
+ l.next()
+
+ if l.peek() == '[' {
+ // token '[[' signifies an array of tables
+ l.next()
+ l.emit(tokenDoubleLeftBracket)
+ return l.lexInsideTableArrayKey
+ }
+ // vanilla table key
+ l.emit(tokenLeftBracket)
+ return l.lexInsideTableKey
+}
+
+// Parse the key till "]]", but only bare keys are supported
+func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
+ for r := l.peek(); r != eof; r = l.peek() {
+ switch r {
+ case ']':
+ if l.currentTokenStop > l.currentTokenStart {
+ l.emit(tokenKeyGroupArray)
+ }
+ l.next()
+ if l.peek() != ']' {
+ break
+ }
+ l.next()
+ l.emit(tokenDoubleRightBracket)
+ return l.lexVoid
+ case '[':
+ return l.errorf("table array key cannot contain ']'")
+ default:
+ l.next()
+ }
+ }
+ return l.errorf("unclosed table array key")
+}
+
+// Parse the key till "]" but only bare keys are supported
+func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
+ for r := l.peek(); r != eof; r = l.peek() {
+ switch r {
+ case ']':
+ if l.currentTokenStop > l.currentTokenStart {
+ l.emit(tokenKeyGroup)
+ }
+ l.next()
+ l.emit(tokenRightBracket)
+ return l.lexVoid
+ case '[':
+ return l.errorf("table key cannot contain ']'")
+ default:
+ l.next()
+ }
+ }
+ return l.errorf("unclosed table key")
+}
+
+func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
+ l.next()
+ l.emit(tokenRightBracket)
+ if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' {
+ return l.errorf("cannot have ']' here")
+ }
+ l.brackets = l.brackets[:len(l.brackets)-1]
+ return l.lexRvalue
+}
+
+type validRuneFn func(r rune) bool
+
+func isValidHexRune(r rune) bool {
+ return r >= 'a' && r <= 'f' ||
+ r >= 'A' && r <= 'F' ||
+ r >= '0' && r <= '9' ||
+ r == '_'
+}
+
+func isValidOctalRune(r rune) bool {
+ return r >= '0' && r <= '7' || r == '_'
+}
+
+func isValidBinaryRune(r rune) bool {
+ return r == '0' || r == '1' || r == '_'
+}
+
+func (l *tomlLexer) lexNumber() tomlLexStateFn {
+ r := l.peek()
+
+ if r == '0' {
+ follow := l.peekString(2)
+ if len(follow) == 2 {
+ var isValidRune validRuneFn
+ switch follow[1] {
+ case 'x':
+ isValidRune = isValidHexRune
+ case 'o':
+ isValidRune = isValidOctalRune
+ case 'b':
+ isValidRune = isValidBinaryRune
+ default:
+ if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' {
+ return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1]))
+ }
+ }
+
+ if isValidRune != nil {
+ l.next()
+ l.next()
+ digitSeen := false
+ for {
+ next := l.peek()
+ if !isValidRune(next) {
+ break
+ }
+ digitSeen = true
+ l.next()
+ }
+
+ if !digitSeen {
+ return l.errorf("number needs at least one digit")
+ }
+
+ l.emit(tokenInteger)
+
+ return l.lexRvalue
+ }
+ }
+ }
+
+ if r == '+' || r == '-' {
+ l.next()
+ if l.follow("inf") {
+ return l.lexInf
+ }
+ if l.follow("nan") {
+ return l.lexNan
+ }
+ }
+
+ pointSeen := false
+ expSeen := false
+ digitSeen := false
+ for {
+ next := l.peek()
+ if next == '.' {
+ if pointSeen {
+ return l.errorf("cannot have two dots in one float")
+ }
+ l.next()
+ if !isDigit(l.peek()) {
+ return l.errorf("float cannot end with a dot")
+ }
+ pointSeen = true
+ } else if next == 'e' || next == 'E' {
+ expSeen = true
+ l.next()
+ r := l.peek()
+ if r == '+' || r == '-' {
+ l.next()
+ }
+ } else if isDigit(next) {
+ digitSeen = true
+ l.next()
+ } else if next == '_' {
+ l.next()
+ } else {
+ break
+ }
+ if pointSeen && !digitSeen {
+ return l.errorf("cannot start float with a dot")
+ }
+ }
+
+ if !digitSeen {
+ return l.errorf("no digit in that number")
+ }
+ if pointSeen || expSeen {
+ l.emit(tokenFloat)
+ } else {
+ l.emit(tokenInteger)
+ }
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) run() {
+ for state := l.lexVoid; state != nil; {
+ state = state()
+ }
+}
+
+// Entry point
+func lexToml(inputBytes []byte) []token {
+ runes := bytes.Runes(inputBytes)
+ l := &tomlLexer{
+ input: runes,
+ tokens: make([]token, 0, 256),
+ line: 1,
+ col: 1,
+ endbufferLine: 1,
+ endbufferCol: 1,
+ }
+ l.run()
+ return l.tokens
+}
diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go
new file mode 100644
index 000000000..9dfe4b9e6
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/localtime.go
@@ -0,0 +1,287 @@
+// Implementation of TOML's local date/time.
+//
+// Copied over from Google's civil to avoid pulling all the Google dependencies.
+// Originals:
+// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
+// Changes:
+// * Renamed files from civil* to localtime*.
+// * Package changed from civil to toml.
+// * 'Local' prefix added to all structs.
+//
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package civil implements types for civil time, a time-zone-independent
+// representation of time that follows the rules of the proleptic
+// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
+// minutes.
+//
+// Because they lack location information, these types do not represent unique
+// moments or intervals of time. Use time.Time for that purpose.
+package toml
+
+import (
+ "fmt"
+ "time"
+)
+
+// A LocalDate represents a date (year, month, day).
+//
+// This type does not include location information, and therefore does not
+// describe a unique 24-hour timespan.
+type LocalDate struct {
+ Year int // Year (e.g., 2014).
+ Month time.Month // Month of the year (January = 1, ...).
+ Day int // Day of the month, starting at 1.
+}
+
+// LocalDateOf returns the LocalDate in which a time occurs in that time's location.
+func LocalDateOf(t time.Time) LocalDate {
+ var d LocalDate
+ d.Year, d.Month, d.Day = t.Date()
+ return d
+}
+
+// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents.
+func ParseLocalDate(s string) (LocalDate, error) {
+ t, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ return LocalDate{}, err
+ }
+ return LocalDateOf(t), nil
+}
+
+// String returns the date in RFC3339 full-date format.
+func (d LocalDate) String() string {
+ return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
+}
+
+// IsValid reports whether the date is valid.
+func (d LocalDate) IsValid() bool {
+ return LocalDateOf(d.In(time.UTC)) == d
+}
+
+// In returns the time corresponding to time 00:00:00 of the date in the location.
+//
+// In is always consistent with time.LocalDate, even when time.LocalDate returns a time
+// on a different day. For example, if loc is America/Indiana/Vincennes, then both
+// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc)
+// and
+// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc)
+// return 23:00:00 on April 30, 1955.
+//
+// In panics if loc is nil.
+func (d LocalDate) In(loc *time.Location) time.Time {
+ return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
+}
+
+// AddDays returns the date that is n days in the future.
+// n can also be negative to go into the past.
+func (d LocalDate) AddDays(n int) LocalDate {
+ return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n))
+}
+
+// DaysSince returns the signed number of days between the date and s, not including the end day.
+// This is the inverse operation to AddDays.
+func (d LocalDate) DaysSince(s LocalDate) (days int) {
+ // We convert to Unix time so we do not have to worry about leap seconds:
+ // Unix time increases by exactly 86400 seconds per day.
+ deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
+ return int(deltaUnix / 86400)
+}
+
+// Before reports whether d1 occurs before d2.
+func (d1 LocalDate) Before(d2 LocalDate) bool {
+ if d1.Year != d2.Year {
+ return d1.Year < d2.Year
+ }
+ if d1.Month != d2.Month {
+ return d1.Month < d2.Month
+ }
+ return d1.Day < d2.Day
+}
+
+// After reports whether d1 occurs after d2.
+func (d1 LocalDate) After(d2 LocalDate) bool {
+ return d2.Before(d1)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of d.String().
+func (d LocalDate) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The date is expected to be a string in a format accepted by ParseLocalDate.
+func (d *LocalDate) UnmarshalText(data []byte) error {
+ var err error
+ *d, err = ParseLocalDate(string(data))
+ return err
+}
+
+// A LocalTime represents a time with nanosecond precision.
+//
+// This type does not include location information, and therefore does not
+// describe a unique moment in time.
+//
+// This type exists to represent the TIME type in storage-based APIs like BigQuery.
+// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type.
+type LocalTime struct {
+ Hour int // The hour of the day in 24-hour format; range [0-23]
+ Minute int // The minute of the hour; range [0-59]
+ Second int // The second of the minute; range [0-59]
+ Nanosecond int // The nanosecond of the second; range [0-999999999]
+}
+
+// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs
+// in that time's location. It ignores the date.
+func LocalTimeOf(t time.Time) LocalTime {
+ var tm LocalTime
+ tm.Hour, tm.Minute, tm.Second = t.Clock()
+ tm.Nanosecond = t.Nanosecond()
+ return tm
+}
+
+// ParseLocalTime parses a string and returns the time value it represents.
+// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After
+// the HH:MM:SS part of the string, an optional fractional part may appear,
+// consisting of a decimal point followed by one to nine decimal digits.
+// (RFC3339 admits only one digit after the decimal point).
+func ParseLocalTime(s string) (LocalTime, error) {
+ t, err := time.Parse("15:04:05.999999999", s)
+ if err != nil {
+ return LocalTime{}, err
+ }
+ return LocalTimeOf(t), nil
+}
+
+// String returns the date in the format described in ParseLocalTime. If Nanoseconds
+// is zero, no fractional part will be generated. Otherwise, the result will
+// end with a fractional part consisting of a decimal point and nine digits.
+func (t LocalTime) String() string {
+ s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
+ if t.Nanosecond == 0 {
+ return s
+ }
+ return s + fmt.Sprintf(".%09d", t.Nanosecond)
+}
+
+// IsValid reports whether the time is valid.
+func (t LocalTime) IsValid() bool {
+ // Construct a non-zero time.
+ tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
+ return LocalTimeOf(tm) == t
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of t.String().
+func (t LocalTime) MarshalText() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The time is expected to be a string in a format accepted by ParseLocalTime.
+func (t *LocalTime) UnmarshalText(data []byte) error {
+ var err error
+ *t, err = ParseLocalTime(string(data))
+ return err
+}
+
+// A LocalDateTime represents a date and time.
+//
+// This type does not include location information, and therefore does not
+// describe a unique moment in time.
+type LocalDateTime struct {
+ Date LocalDate
+ Time LocalTime
+}
+
+// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub.
+
+// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location.
+func LocalDateTimeOf(t time.Time) LocalDateTime {
+ return LocalDateTime{
+ Date: LocalDateOf(t),
+ Time: LocalTimeOf(t),
+ }
+}
+
+// ParseLocalDateTime parses a string and returns the LocalDateTime it represents.
+// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits
+// the time offset but includes an optional fractional time, as described in
+// ParseLocalTime. Informally, the accepted format is
+// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
+// where the 'T' may be a lower-case 't'.
+func ParseLocalDateTime(s string) (LocalDateTime, error) {
+ t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
+ if err != nil {
+ t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
+ if err != nil {
+ return LocalDateTime{}, err
+ }
+ }
+ return LocalDateTimeOf(t), nil
+}
+
+// String returns the date in the format described in ParseLocalDate.
+func (dt LocalDateTime) String() string {
+ return dt.Date.String() + "T" + dt.Time.String()
+}
+
+// IsValid reports whether the datetime is valid.
+func (dt LocalDateTime) IsValid() bool {
+ return dt.Date.IsValid() && dt.Time.IsValid()
+}
+
+// In returns the time corresponding to the LocalDateTime in the given location.
+//
+// If the time is missing or ambigous at the location, In returns the same
+// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then
+// both
+// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc)
+// and
+// civil.LocalDateTime{
+// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}},
+// civil.LocalTime{Minute: 30}}.In(loc)
+// return 23:30:00 on April 30, 1955.
+//
+// In panics if loc is nil.
+func (dt LocalDateTime) In(loc *time.Location) time.Time {
+ return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
+}
+
+// Before reports whether dt1 occurs before dt2.
+func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool {
+ return dt1.In(time.UTC).Before(dt2.In(time.UTC))
+}
+
+// After reports whether dt1 occurs after dt2.
+func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool {
+ return dt2.Before(dt1)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of dt.String().
+func (dt LocalDateTime) MarshalText() ([]byte, error) {
+ return []byte(dt.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The datetime is expected to be a string in a format accepted by ParseLocalDateTime
+func (dt *LocalDateTime) UnmarshalText(data []byte) error {
+ var err error
+ *dt, err = ParseLocalDateTime(string(data))
+ return err
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
new file mode 100644
index 000000000..3443c3545
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -0,0 +1,1308 @@
+package toml
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ tagFieldName = "toml"
+ tagFieldComment = "comment"
+ tagCommented = "commented"
+ tagMultiline = "multiline"
+ tagLiteral = "literal"
+ tagDefault = "default"
+)
+
+type tomlOpts struct {
+ name string
+ nameFromTag bool
+ comment string
+ commented bool
+ multiline bool
+ literal bool
+ include bool
+ omitempty bool
+ defaultValue string
+}
+
+type encOpts struct {
+ quoteMapKeys bool
+ arraysOneElementPerLine bool
+}
+
+var encOptsDefaults = encOpts{
+ quoteMapKeys: false,
+}
+
+type annotation struct {
+ tag string
+ comment string
+ commented string
+ multiline string
+ literal string
+ defaultValue string
+}
+
+var annotationDefault = annotation{
+ tag: tagFieldName,
+ comment: tagFieldComment,
+ commented: tagCommented,
+ multiline: tagMultiline,
+ literal: tagLiteral,
+ defaultValue: tagDefault,
+}
+
+type MarshalOrder int
+
+// Orders the Encoder can write the fields to the output stream.
+const (
+ // Sort fields alphabetically.
+ OrderAlphabetical MarshalOrder = iota + 1
+ // Preserve the order the fields are encountered. For example, the order of fields in
+ // a struct.
+ OrderPreserve
+)
+
+var timeType = reflect.TypeOf(time.Time{})
+var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem()
+var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+var localDateType = reflect.TypeOf(LocalDate{})
+var localTimeType = reflect.TypeOf(LocalTime{})
+var localDateTimeType = reflect.TypeOf(LocalDateTime{})
+var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
+
+// Check if the given marshal type maps to a Tree primitive
+func isPrimitive(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isPrimitive(mtype.Elem())
+ case reflect.Bool:
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Struct:
+ return isTimeType(mtype)
+ default:
+ return false
+ }
+}
+
+func isTimeType(mtype reflect.Type) bool {
+ return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType
+}
+
+// Check if the given marshal type maps to a Tree slice or array
+func isTreeSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTreeSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isTree(mtype.Elem())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a slice or array of a custom marshaler type
+func isCustomMarshalerSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isCustomMarshalerSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a slice or array of a text marshaler type
+func isTextMarshalerSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTextMarshalerSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a non-Tree slice or array
+func isOtherSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isOtherSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return !isTreeSequence(mtype)
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a Tree
+func isTree(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTree(mtype.Elem())
+ case reflect.Map:
+ return true
+ case reflect.Struct:
+ return !isPrimitive(mtype)
+ default:
+ return false
+ }
+}
+
+func isCustomMarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(marshalerType)
+}
+
+func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
+ return mval.Interface().(Marshaler).MarshalTOML()
+}
+
+func isTextMarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(textMarshalerType) && !isTimeType(mtype)
+}
+
+func callTextMarshaler(mval reflect.Value) ([]byte, error) {
+ return mval.Interface().(encoding.TextMarshaler).MarshalText()
+}
+
+func isCustomUnmarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(unmarshalerType)
+}
+
+func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error {
+ return mval.Interface().(Unmarshaler).UnmarshalTOML(tval)
+}
+
+func isTextUnmarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(textUnmarshalerType)
+}
+
+func callTextUnmarshaler(mval reflect.Value, text []byte) error {
+ return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text)
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid TOML.
+type Marshaler interface {
+ MarshalTOML() ([]byte, error)
+}
+
+// Unmarshaler is the interface implemented by types that
+// can unmarshal a TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+/*
+Marshal returns the TOML encoding of v. Behavior is similar to the Go json
+encoder, except that there is no concept of a Marshaler interface or MarshalTOML
+function for sub-structs, and currently only definite types can be marshaled
+(i.e. no `interface{}`).
+
+The following struct annotations are supported:
+
+ toml:"Field" Overrides the field's name to output.
+ omitempty When set, empty values and groups are not emitted.
+ comment:"comment" Emits a # comment on the same line. This supports new lines.
+ commented:"true" Emits the value as commented.
+
+Note that pointers are automatically assigned the "omitempty" option, as TOML
+explicitly does not handle null values (saying instead the label should be
+dropped).
+
+Tree structural types and corresponding marshal types:
+
+ *Tree (*)struct, (*)map[string]interface{}
+ []*Tree (*)[](*)struct, (*)[](*)map[string]interface{}
+ []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
+ interface{} (*)primitive
+
+Tree primitive types and corresponding marshal types:
+
+ uint64 uint, uint8-uint64, pointers to same
+ int64 int, int8-uint64, pointers to same
+ float64 float32, float64, pointers to same
+ string string, pointers to same
+ bool bool, pointers to same
+ time.LocalTime time.LocalTime{}, pointers to same
+
+For additional flexibility, use the Encoder API.
+*/
+func Marshal(v interface{}) ([]byte, error) {
+ return NewEncoder(nil).marshal(v)
+}
+
+// Encoder writes TOML values to an output stream.
+type Encoder struct {
+ w io.Writer
+ encOpts
+ annotation
+ line int
+ col int
+ order MarshalOrder
+ promoteAnon bool
+ compactComments bool
+ indentation string
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ encOpts: encOptsDefaults,
+ annotation: annotationDefault,
+ line: 0,
+ col: 1,
+ order: OrderAlphabetical,
+ indentation: " ",
+ }
+}
+
+// Encode writes the TOML encoding of v to the stream.
+//
+// See the documentation for Marshal for details.
+func (e *Encoder) Encode(v interface{}) error {
+ b, err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+ if _, err := e.w.Write(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+// QuoteMapKeys sets up the encoder to encode
+// maps with string type keys with quoted TOML keys.
+//
+// This relieves the character limitations on map keys.
+func (e *Encoder) QuoteMapKeys(v bool) *Encoder {
+ e.quoteMapKeys = v
+ return e
+}
+
+// ArraysWithOneElementPerLine sets up the encoder to encode arrays
+// with more than one element on multiple lines instead of one.
+//
+// For example:
+//
+// A = [1,2,3]
+//
+// Becomes
+//
+// A = [
+// 1,
+// 2,
+// 3,
+// ]
+func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder {
+ e.arraysOneElementPerLine = v
+ return e
+}
+
+// Order allows to change in which order fields will be written to the output stream.
+func (e *Encoder) Order(ord MarshalOrder) *Encoder {
+ e.order = ord
+ return e
+}
+
+// Indentation allows to change indentation when marshalling.
+func (e *Encoder) Indentation(indent string) *Encoder {
+ e.indentation = indent
+ return e
+}
+
+// SetTagName allows changing default tag "toml"
+func (e *Encoder) SetTagName(v string) *Encoder {
+ e.tag = v
+ return e
+}
+
+// SetTagComment allows changing default tag "comment"
+func (e *Encoder) SetTagComment(v string) *Encoder {
+ e.comment = v
+ return e
+}
+
+// SetTagCommented allows changing default tag "commented"
+func (e *Encoder) SetTagCommented(v string) *Encoder {
+ e.commented = v
+ return e
+}
+
+// SetTagMultiline allows changing default tag "multiline"
+func (e *Encoder) SetTagMultiline(v string) *Encoder {
+ e.multiline = v
+ return e
+}
+
+// PromoteAnonymous allows to change how anonymous struct fields are marshaled.
+// Usually, they are marshaled as if the inner exported fields were fields in
+// the outer struct. However, if an anonymous struct field is given a name in
+// its TOML tag, it is treated like a regular struct field with that name.
+// rather than being anonymous.
+//
+// In case anonymous promotion is enabled, all anonymous structs are promoted
+// and treated like regular struct fields.
+func (e *Encoder) PromoteAnonymous(promote bool) *Encoder {
+ e.promoteAnon = promote
+ return e
+}
+
+// CompactComments removes the new line before each comment in the tree.
+func (e *Encoder) CompactComments(cc bool) *Encoder {
+ e.compactComments = cc
+ return e
+}
+
+func (e *Encoder) marshal(v interface{}) ([]byte, error) {
+ // Check if indentation is valid
+ for _, char := range e.indentation {
+ if !isSpace(char) {
+ return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters")
+ }
+ }
+
+ mtype := reflect.TypeOf(v)
+ if mtype == nil {
+ return []byte{}, errors.New("nil cannot be marshaled to TOML")
+ }
+
+ switch mtype.Kind() {
+ case reflect.Struct, reflect.Map:
+ case reflect.Ptr:
+ if mtype.Elem().Kind() != reflect.Struct {
+ return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML")
+ }
+ if reflect.ValueOf(v).IsNil() {
+ return []byte{}, errors.New("nil pointer cannot be marshaled to TOML")
+ }
+ default:
+ return []byte{}, errors.New("Only a struct or map can be marshaled to TOML")
+ }
+
+ sval := reflect.ValueOf(v)
+ if isCustomMarshaler(mtype) {
+ return callCustomMarshaler(sval)
+ }
+ if isTextMarshaler(mtype) {
+ return callTextMarshaler(sval)
+ }
+ t, err := e.valueToTree(mtype, sval)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ var buf bytes.Buffer
+ _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false)
+
+ return buf.Bytes(), err
+}
+
+// Create next tree with a position based on Encoder.line
+func (e *Encoder) nextTree() *Tree {
+ return newTreeWithPosition(Position{Line: e.line, Col: 1})
+}
+
+// Convert given marshal struct or map value to toml tree
+func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return e.valueToTree(mtype.Elem(), mval.Elem())
+ }
+ tval := e.nextTree()
+ switch mtype.Kind() {
+ case reflect.Struct:
+ switch mval.Interface().(type) {
+ case Tree:
+ reflect.ValueOf(tval).Elem().Set(mval)
+ default:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef, mvalf := mtype.Field(i), mval.Field(i)
+ opts := tomlOptions(mtypef, e.annotation)
+ if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) {
+ val, err := e.valueToToml(mtypef.Type, mvalf)
+ if err != nil {
+ return nil, err
+ }
+ if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon {
+ e.appendTree(tval, tree)
+ } else {
+ val = e.wrapTomlValue(val, tval)
+ tval.SetPathWithOptions([]string{opts.name}, SetOptions{
+ Comment: opts.comment,
+ Commented: opts.commented,
+ Multiline: opts.multiline,
+ Literal: opts.literal,
+ }, val)
+ }
+ }
+ }
+ }
+ case reflect.Map:
+ keys := mval.MapKeys()
+ if e.order == OrderPreserve && len(keys) > 0 {
+ // Sorting []reflect.Value is not straight forward.
+ //
+ // OrderPreserve will support deterministic results when string is used
+ // as the key to maps.
+ typ := keys[0].Type()
+ kind := keys[0].Kind()
+ if kind == reflect.String {
+ ikeys := make([]string, len(keys))
+ for i := range keys {
+ ikeys[i] = keys[i].Interface().(string)
+ }
+ sort.Strings(ikeys)
+ for i := range ikeys {
+ keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ)
+ }
+ }
+ }
+ for _, key := range keys {
+ mvalf := mval.MapIndex(key)
+ if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() {
+ continue
+ }
+ val, err := e.valueToToml(mtype.Elem(), mvalf)
+ if err != nil {
+ return nil, err
+ }
+ val = e.wrapTomlValue(val, tval)
+ if e.quoteMapKeys {
+ keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine)
+ if err != nil {
+ return nil, err
+ }
+ tval.SetPath([]string{keyStr}, val)
+ } else {
+ tval.SetPath([]string{key.String()}, val)
+ }
+ }
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of Toml trees
+func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
+ tval := make([]*Tree, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := e.valueToTree(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of toml values
+func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ tval := make([]interface{}, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := e.valueToToml(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal value to toml value
+func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ if mtype.Kind() == reflect.Ptr {
+ switch {
+ case isCustomMarshaler(mtype):
+ return callCustomMarshaler(mval)
+ case isTextMarshaler(mtype):
+ b, err := callTextMarshaler(mval)
+ return string(b), err
+ default:
+ return e.valueToToml(mtype.Elem(), mval.Elem())
+ }
+ }
+ if mtype.Kind() == reflect.Interface {
+ return e.valueToToml(mval.Elem().Type(), mval.Elem())
+ }
+ switch {
+ case isCustomMarshaler(mtype):
+ return callCustomMarshaler(mval)
+ case isTextMarshaler(mtype):
+ b, err := callTextMarshaler(mval)
+ return string(b), err
+ case isTree(mtype):
+ return e.valueToTree(mtype, mval)
+ case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype):
+ return e.valueToOtherSlice(mtype, mval)
+ case isTreeSequence(mtype):
+ return e.valueToTreeSlice(mtype, mval)
+ default:
+ switch mtype.Kind() {
+ case reflect.Bool:
+ return mval.Bool(), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) {
+ return fmt.Sprint(mval), nil
+ }
+ return mval.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return mval.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return mval.Float(), nil
+ case reflect.String:
+ return mval.String(), nil
+ case reflect.Struct:
+ return mval.Interface(), nil
+ default:
+ return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
+ }
+ }
+}
+
+func (e *Encoder) appendTree(t, o *Tree) error {
+ for key, value := range o.values {
+ if _, ok := t.values[key]; ok {
+ continue
+ }
+ if tomlValue, ok := value.(*tomlValue); ok {
+ tomlValue.position.Col = t.position.Col
+ }
+ t.values[key] = value
+ }
+ return nil
+}
+
+// Create a toml value with the current line number as the position line
+func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} {
+ _, isTree := val.(*Tree)
+ _, isTreeS := val.([]*Tree)
+ if isTree || isTreeS {
+ e.line++
+ return val
+ }
+
+ ret := &tomlValue{
+ value: val,
+ position: Position{
+ e.line,
+ parent.position.Col,
+ },
+ }
+ e.line++
+ return ret
+}
+
+// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
+// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
+// sub-structs, and only definite types can be unmarshaled.
+func (t *Tree) Unmarshal(v interface{}) error {
+ d := Decoder{tval: t, tagName: tagFieldName}
+ return d.unmarshal(v)
+}
+
+// Marshal returns the TOML encoding of Tree.
+// See Marshal() documentation for types mapping table.
+func (t *Tree) Marshal() ([]byte, error) {
+ var buf bytes.Buffer
+ _, err := t.WriteTo(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// Unmarshal parses the TOML-encoded data and stores the result in the value
+// pointed to by v. Behavior is similar to the Go json encoder, except that there
+// is no concept of an Unmarshaler interface or UnmarshalTOML function for
+// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+// `interface{}`).
+//
+// The following struct annotations are supported:
+//
+// toml:"Field" Overrides the field's name to map to.
+// default:"foo" Provides a default value.
+//
+// For default values, only fields of the following types are supported:
+// * string
+// * bool
+// * int
+// * int64
+// * float64
+//
+// See Marshal() documentation for types mapping table.
+func Unmarshal(data []byte, v interface{}) error {
+ t, err := LoadReader(bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ return t.Unmarshal(v)
+}
+
+// Decoder reads and decodes TOML values from an input stream.
+type Decoder struct {
+ r io.Reader
+ tval *Tree
+ encOpts
+ tagName string
+ strict bool
+ visitor visitorState
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ r: r,
+ encOpts: encOptsDefaults,
+ tagName: tagFieldName,
+ }
+}
+
+// Decode reads a TOML-encoded value from it's input
+// and unmarshals it in the value pointed at by v.
+//
+// See the documentation for Marshal for details.
+func (d *Decoder) Decode(v interface{}) error {
+ var err error
+ d.tval, err = LoadReader(d.r)
+ if err != nil {
+ return err
+ }
+ return d.unmarshal(v)
+}
+
+// SetTagName allows changing default tag "toml"
+func (d *Decoder) SetTagName(v string) *Decoder {
+ d.tagName = v
+ return d
+}
+
+// Strict allows changing to strict decoding. Any fields that are found in the
+// input data and do not have a corresponding struct member cause an error.
+func (d *Decoder) Strict(strict bool) *Decoder {
+ d.strict = strict
+ return d
+}
+
+func (d *Decoder) unmarshal(v interface{}) error {
+ mtype := reflect.TypeOf(v)
+ if mtype == nil {
+ return errors.New("nil cannot be unmarshaled from TOML")
+ }
+ if mtype.Kind() != reflect.Ptr {
+ return errors.New("only a pointer to struct or map can be unmarshaled from TOML")
+ }
+
+ elem := mtype.Elem()
+
+ switch elem.Kind() {
+ case reflect.Struct, reflect.Map:
+ case reflect.Interface:
+ elem = mapStringInterfaceType
+ default:
+ return errors.New("only a pointer to struct or map can be unmarshaled from TOML")
+ }
+
+ if reflect.ValueOf(v).IsNil() {
+ return errors.New("nil pointer cannot be unmarshaled from TOML")
+ }
+
+ vv := reflect.ValueOf(v).Elem()
+
+ if d.strict {
+ d.visitor = newVisitorState(d.tval)
+ }
+
+ sval, err := d.valueFromTree(elem, d.tval, &vv)
+ if err != nil {
+ return err
+ }
+ if err := d.visitor.validate(); err != nil {
+ return err
+ }
+ reflect.ValueOf(v).Elem().Set(sval)
+ return nil
+}
+
+// Convert toml tree to marshal struct or map, using marshal type. When mval1
+// is non-nil, merge fields into the given value instead of allocating a new one.
+func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return d.unwrapPointer(mtype, tval, mval1)
+ }
+
+ // Check if pointer to value implements the Unmarshaler interface.
+ if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) {
+ d.visitor.visitAll()
+
+ if tval == nil {
+ return mvalPtr.Elem(), nil
+ }
+
+ if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
+ var mval reflect.Value
+ switch mtype.Kind() {
+ case reflect.Struct:
+ if mval1 != nil {
+ mval = *mval1
+ } else {
+ mval = reflect.New(mtype).Elem()
+ }
+
+ switch mval.Interface().(type) {
+ case Tree:
+ mval.Set(reflect.ValueOf(tval).Elem())
+ default:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef := mtype.Field(i)
+ an := annotation{tag: d.tagName}
+ opts := tomlOptions(mtypef, an)
+ if !opts.include {
+ continue
+ }
+ baseKey := opts.name
+ keysToTry := []string{
+ baseKey,
+ strings.ToLower(baseKey),
+ strings.ToTitle(baseKey),
+ strings.ToLower(string(baseKey[0])) + baseKey[1:],
+ }
+
+ found := false
+ if tval != nil {
+ for _, key := range keysToTry {
+ exists := tval.HasPath([]string{key})
+ if !exists {
+ continue
+ }
+
+ d.visitor.push(key)
+ val := tval.GetPath([]string{key})
+ fval := mval.Field(i)
+ mvalf, err := d.valueFromToml(mtypef.Type, val, &fval)
+ if err != nil {
+ return mval, formatError(err, tval.GetPositionPath([]string{key}))
+ }
+ mval.Field(i).Set(mvalf)
+ found = true
+ d.visitor.pop()
+ break
+ }
+ }
+
+ if !found && opts.defaultValue != "" {
+ mvalf := mval.Field(i)
+ var val interface{}
+ var err error
+ switch mvalf.Kind() {
+ case reflect.String:
+ val = opts.defaultValue
+ case reflect.Bool:
+ val, err = strconv.ParseBool(opts.defaultValue)
+ case reflect.Uint:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 0)
+ case reflect.Uint8:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 8)
+ case reflect.Uint16:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 16)
+ case reflect.Uint32:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 32)
+ case reflect.Uint64:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 64)
+ case reflect.Int:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 0)
+ case reflect.Int8:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 8)
+ case reflect.Int16:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 16)
+ case reflect.Int32:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 32)
+ case reflect.Int64:
+ // Check if the provided number has a non-numeric extension.
+ var hasExtension bool
+ if len(opts.defaultValue) > 0 {
+ lastChar := opts.defaultValue[len(opts.defaultValue)-1]
+ if lastChar < '0' || lastChar > '9' {
+ hasExtension = true
+ }
+ }
+ // If the value is a time.Duration with extension, parse as duration.
+ // If the value is an int64 or a time.Duration without extension, parse as number.
+ if hasExtension && mvalf.Type().String() == "time.Duration" {
+ val, err = time.ParseDuration(opts.defaultValue)
+ } else {
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 64)
+ }
+ case reflect.Float32:
+ val, err = strconv.ParseFloat(opts.defaultValue, 32)
+ case reflect.Float64:
+ val, err = strconv.ParseFloat(opts.defaultValue, 64)
+ default:
+ return mvalf, fmt.Errorf("unsupported field type for default option")
+ }
+
+ if err != nil {
+ return mvalf, err
+ }
+ mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type()))
+ }
+
+ // save the old behavior above and try to check structs
+ if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct {
+ tmpTval := tval
+ if !mtypef.Anonymous {
+ tmpTval = nil
+ }
+ fval := mval.Field(i)
+ v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval)
+ if err != nil {
+ return v, err
+ }
+ mval.Field(i).Set(v)
+ }
+ }
+ }
+ case reflect.Map:
+ mval = reflect.MakeMap(mtype)
+ for _, key := range tval.Keys() {
+ d.visitor.push(key)
+ // TODO: path splits key
+ val := tval.GetPath([]string{key})
+ mvalf, err := d.valueFromToml(mtype.Elem(), val, nil)
+ if err != nil {
+ return mval, formatError(err, tval.GetPositionPath([]string{key}))
+ }
+ mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf)
+ d.visitor.pop()
+ }
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal struct/map slice, using marshal type
+func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
+ mval, err := makeSliceOrArray(mtype, len(tval))
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < len(tval); i++ {
+ d.visitor.push(strconv.Itoa(i))
+ val, err := d.valueFromTree(mtype.Elem(), tval[i], nil)
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ d.visitor.pop()
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+ mval, err := makeSliceOrArray(mtype, len(tval))
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < len(tval); i++ {
+ val, err := d.valueFromToml(mtype.Elem(), tval[i], nil)
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+ val := reflect.ValueOf(tval)
+ length := val.Len()
+
+ mval, err := makeSliceOrArray(mtype, length)
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < length; i++ {
+ val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil)
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Create a new slice or a new array with specified length
+func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) {
+ var mval reflect.Value
+ switch mtype.Kind() {
+ case reflect.Slice:
+ mval = reflect.MakeSlice(mtype, tLength, tLength)
+ case reflect.Array:
+ mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem()
+ if tLength > mtype.Len() {
+ return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len())
+ }
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type. When mval1 is non-nil
+// and the given type is a struct value, merge fields into it.
+func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return d.unwrapPointer(mtype, tval, mval1)
+ }
+
+ switch t := tval.(type) {
+ case *Tree:
+ var mval11 *reflect.Value
+ if mtype.Kind() == reflect.Struct {
+ mval11 = mval1
+ }
+
+ if isTree(mtype) {
+ return d.valueFromTree(mtype, t, mval11)
+ }
+
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil)
+ } else {
+ return d.valueFromToml(mval1.Elem().Type(), t, nil)
+ }
+ }
+
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval)
+ case []*Tree:
+ if isTreeSequence(mtype) {
+ return d.valueFromTreeSlice(mtype, t)
+ }
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t)
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval)
+ case []interface{}:
+ d.visitor.visit()
+ if isOtherSequence(mtype) {
+ return d.valueFromOtherSlice(mtype, t)
+ }
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t)
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
+ default:
+ d.visitor.visit()
+ mvalPtr := reflect.New(mtype)
+
+ // Check if pointer to value implements the Unmarshaler interface.
+ if isCustomUnmarshaler(mvalPtr.Type()) {
+ if err := callCustomUnmarshaler(mvalPtr, tval); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
+ // Check if pointer to value implements the encoding.TextUnmarshaler.
+ if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) {
+ if err := d.unmarshalText(tval, mvalPtr); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
+ switch mtype.Kind() {
+ case reflect.Bool, reflect.Struct:
+ val := reflect.ValueOf(tval)
+
+ switch val.Type() {
+ case localDateType:
+ localDate := val.Interface().(LocalDate)
+ switch mtype {
+ case timeType:
+ return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil
+ }
+ case localDateTimeType:
+ localDateTime := val.Interface().(LocalDateTime)
+ switch mtype {
+ case timeType:
+ return reflect.ValueOf(time.Date(
+ localDateTime.Date.Year,
+ localDateTime.Date.Month,
+ localDateTime.Date.Day,
+ localDateTime.Time.Hour,
+ localDateTime.Time.Minute,
+ localDateTime.Time.Second,
+ localDateTime.Time.Nanosecond,
+ time.Local)), nil
+ }
+ }
+
+ // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.String:
+ val := reflect.ValueOf(tval)
+ // stupidly, int64 is convertible to string. So special case this.
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val := reflect.ValueOf(tval)
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String {
+ d, err := time.ParseDuration(val.String())
+ if err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err)
+ }
+ return reflect.ValueOf(d), nil
+ }
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ if val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Float32, reflect.Float64:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Interface:
+ if mval1 == nil || mval1.IsNil() {
+ return reflect.ValueOf(tval), nil
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
+ case reflect.Slice, reflect.Array:
+ if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) {
+ return d.valueFromOtherSliceI(mtype, t)
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
+ default:
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
+ }
+ }
+}
+
+func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
+ var melem *reflect.Value
+
+ if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) {
+ elem := mval1.Elem()
+ melem = &elem
+ }
+
+ val, err := d.valueFromToml(mtype.Elem(), tval, melem)
+ if err != nil {
+ return reflect.ValueOf(nil), err
+ }
+ mval := reflect.New(mtype.Elem())
+ mval.Elem().Set(val)
+ return mval, nil
+}
+
+func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error {
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, tval)
+ return callTextUnmarshaler(mval, buf.Bytes())
+}
+
+func tomlOptions(vf reflect.StructField, an annotation) tomlOpts {
+ tag := vf.Tag.Get(an.tag)
+ parse := strings.Split(tag, ",")
+ var comment string
+ if c := vf.Tag.Get(an.comment); c != "" {
+ comment = c
+ }
+ commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented))
+ multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline))
+ literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal))
+ defaultValue := vf.Tag.Get(tagDefault)
+ result := tomlOpts{
+ name: vf.Name,
+ nameFromTag: false,
+ comment: comment,
+ commented: commented,
+ multiline: multiline,
+ literal: literal,
+ include: true,
+ omitempty: false,
+ defaultValue: defaultValue,
+ }
+ if parse[0] != "" {
+ if parse[0] == "-" && len(parse) == 1 {
+ result.include = false
+ } else {
+ result.name = strings.Trim(parse[0], " ")
+ result.nameFromTag = true
+ }
+ }
+ if vf.PkgPath != "" {
+ result.include = false
+ }
+ if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
+ result.omitempty = true
+ }
+ if vf.Type.Kind() == reflect.Ptr {
+ result.omitempty = true
+ }
+ return result
+}
+
+func isZero(val reflect.Value) bool {
+ switch val.Type().Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map:
+ return val.Len() == 0
+ default:
+ return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
+ }
+}
+
+func formatError(err error, pos Position) error {
+ if err.Error()[0] == '(' { // Error already contains position information
+ return err
+ }
+ return fmt.Errorf("%s: %s", pos, err)
+}
+
+// visitorState keeps track of which keys were unmarshaled.
+type visitorState struct {
+ tree *Tree
+ path []string
+ keys map[string]struct{}
+ active bool
+}
+
+func newVisitorState(tree *Tree) visitorState {
+ path, result := []string{}, map[string]struct{}{}
+ insertKeys(path, result, tree)
+ return visitorState{
+ tree: tree,
+ path: path[:0],
+ keys: result,
+ active: true,
+ }
+}
+
+func (s *visitorState) push(key string) {
+ if s.active {
+ s.path = append(s.path, key)
+ }
+}
+
+func (s *visitorState) pop() {
+ if s.active {
+ s.path = s.path[:len(s.path)-1]
+ }
+}
+
+func (s *visitorState) visit() {
+ if s.active {
+ delete(s.keys, strings.Join(s.path, "."))
+ }
+}
+
+func (s *visitorState) visitAll() {
+ if s.active {
+ for k := range s.keys {
+ if strings.HasPrefix(k, strings.Join(s.path, ".")) {
+ delete(s.keys, k)
+ }
+ }
+ }
+}
+
+func (s *visitorState) validate() error {
+ if !s.active {
+ return nil
+ }
+ undecoded := make([]string, 0, len(s.keys))
+ for key := range s.keys {
+ undecoded = append(undecoded, key)
+ }
+ sort.Strings(undecoded)
+ if len(undecoded) > 0 {
+ return fmt.Errorf("undecoded keys: %q", undecoded)
+ }
+ return nil
+}
+
+func insertKeys(path []string, m map[string]struct{}, tree *Tree) {
+ for k, v := range tree.values {
+ switch node := v.(type) {
+ case []*Tree:
+ for i, item := range node {
+ insertKeys(append(path, k, strconv.Itoa(i)), m, item)
+ }
+ case *Tree:
+ insertKeys(append(path, k), m, node)
+ case *tomlValue:
+ m[strings.Join(append(path, k), ".")] = struct{}{}
+ }
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
new file mode 100644
index 000000000..792b72ed7
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
@@ -0,0 +1,39 @@
+title = "TOML Marshal Testing"
+
+[basic_lists]
+ floats = [12.3,45.6,78.9]
+ bools = [true,false,true]
+ dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+ ints = [8001,8001,8002]
+ uints = [5002,5003]
+ strings = ["One","Two","Three"]
+
+[[subdocptrs]]
+ name = "Second"
+
+[basic_map]
+ one = "one"
+ two = "two"
+
+[subdoc]
+
+ [subdoc.second]
+ name = "Second"
+
+ [subdoc.first]
+ name = "First"
+
+[basic]
+ uint = 5001
+ bool = true
+ float = 123.4
+ float64 = 123.456782132399
+ int = 5000
+ string = "Bite me"
+ date = 1979-05-27T07:32:00Z
+
+[[subdoclist]]
+ name = "List.First"
+
+[[subdoclist]]
+ name = "List.Second"
diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml
new file mode 100644
index 000000000..ba5e110bf
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml
@@ -0,0 +1,39 @@
+title = "TOML Marshal Testing"
+
+[basic]
+ bool = true
+ date = 1979-05-27T07:32:00Z
+ float = 123.4
+ float64 = 123.456782132399
+ int = 5000
+ string = "Bite me"
+ uint = 5001
+
+[basic_lists]
+ bools = [true,false,true]
+ dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+ floats = [12.3,45.6,78.9]
+ ints = [8001,8001,8002]
+ strings = ["One","Two","Three"]
+ uints = [5002,5003]
+
+[basic_map]
+ one = "one"
+ two = "two"
+
+[subdoc]
+
+ [subdoc.first]
+ name = "First"
+
+ [subdoc.second]
+ name = "Second"
+
+[[subdoclist]]
+ name = "List.First"
+
+[[subdoclist]]
+ name = "List.Second"
+
+[[subdocptrs]]
+ name = "Second"
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
new file mode 100644
index 000000000..f5e1a44fb
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -0,0 +1,508 @@
+// TOML Parser.
+
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlParser struct {
+ flowIdx int
+ flow []token
+ tree *Tree
+ currentTable []string
+ seenTableKeys []string
+}
+
+type tomlParserStateFn func() tomlParserStateFn
+
+// Formats and panics an error message based on a token
+func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
+ panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
+}
+
+func (p *tomlParser) run() {
+ for state := p.parseStart; state != nil; {
+ state = state()
+ }
+}
+
+func (p *tomlParser) peek() *token {
+ if p.flowIdx >= len(p.flow) {
+ return nil
+ }
+ return &p.flow[p.flowIdx]
+}
+
+func (p *tomlParser) assume(typ tokenType) {
+ tok := p.getToken()
+ if tok == nil {
+ p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
+ }
+ if tok.typ != typ {
+ p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
+ }
+}
+
+func (p *tomlParser) getToken() *token {
+ tok := p.peek()
+ if tok == nil {
+ return nil
+ }
+ p.flowIdx++
+ return tok
+}
+
+func (p *tomlParser) parseStart() tomlParserStateFn {
+ tok := p.peek()
+
+ // end of stream, parsing is finished
+ if tok == nil {
+ return nil
+ }
+
+ switch tok.typ {
+ case tokenDoubleLeftBracket:
+ return p.parseGroupArray
+ case tokenLeftBracket:
+ return p.parseGroup
+ case tokenKey:
+ return p.parseAssign
+ case tokenEOF:
+ return nil
+ case tokenError:
+ p.raiseError(tok, "parsing error: %s", tok.String())
+ default:
+ p.raiseError(tok, "unexpected token %s", tok.typ)
+ }
+ return nil
+}
+
+func (p *tomlParser) parseGroupArray() tomlParserStateFn {
+ startToken := p.getToken() // discard the [[
+ key := p.getToken()
+ if key.typ != tokenKeyGroupArray {
+ p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
+ }
+
+ // get or create table array element at the indicated part in the path
+ keys, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid table array key: %s", err)
+ }
+ p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
+ destTree := p.tree.GetPath(keys)
+ var array []*Tree
+ if destTree == nil {
+ array = make([]*Tree, 0)
+ } else if target, ok := destTree.([]*Tree); ok && target != nil {
+ array = destTree.([]*Tree)
+ } else {
+ p.raiseError(key, "key %s is already assigned and not of type table array", key)
+ }
+ p.currentTable = keys
+
+ // add a new tree to the end of the table array
+ newTree := newTree()
+ newTree.position = startToken.Position
+ array = append(array, newTree)
+ p.tree.SetPath(p.currentTable, array)
+
+ // remove all keys that were children of this table array
+ prefix := key.val + "."
+ found := false
+ for ii := 0; ii < len(p.seenTableKeys); {
+ tableKey := p.seenTableKeys[ii]
+ if strings.HasPrefix(tableKey, prefix) {
+ p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
+ } else {
+ found = (tableKey == key.val)
+ ii++
+ }
+ }
+
+ // keep this key name from use by other kinds of assignments
+ if !found {
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
+ }
+
+ // move to next parser state
+ p.assume(tokenDoubleRightBracket)
+ return p.parseStart
+}
+
+func (p *tomlParser) parseGroup() tomlParserStateFn {
+ startToken := p.getToken() // discard the [
+ key := p.getToken()
+ if key.typ != tokenKeyGroup {
+ p.raiseError(key, "unexpected token %s, was expecting a table key", key)
+ }
+ for _, item := range p.seenTableKeys {
+ if item == key.val {
+ p.raiseError(key, "duplicated tables")
+ }
+ }
+
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
+ keys, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid table array key: %s", err)
+ }
+ if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
+ p.raiseError(key, "%s", err)
+ }
+ destTree := p.tree.GetPath(keys)
+ if target, ok := destTree.(*Tree); ok && target != nil && target.inline {
+ p.raiseError(key, "could not re-define exist inline table or its sub-table : %s",
+ strings.Join(keys, "."))
+ }
+ p.assume(tokenRightBracket)
+ p.currentTable = keys
+ return p.parseStart
+}
+
+func (p *tomlParser) parseAssign() tomlParserStateFn {
+ key := p.getToken()
+ p.assume(tokenEqual)
+
+ parsedKey, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid key: %s", err.Error())
+ }
+
+ value := p.parseRvalue()
+ var tableKey []string
+ if len(p.currentTable) > 0 {
+ tableKey = p.currentTable
+ } else {
+ tableKey = []string{}
+ }
+
+ prefixKey := parsedKey[0 : len(parsedKey)-1]
+ tableKey = append(tableKey, prefixKey...)
+
+ // find the table to assign, looking out for arrays of tables
+ var targetNode *Tree
+ switch node := p.tree.GetPath(tableKey).(type) {
+ case []*Tree:
+ targetNode = node[len(node)-1]
+ case *Tree:
+ targetNode = node
+ case nil:
+ // create intermediate
+ if err := p.tree.createSubTree(tableKey, key.Position); err != nil {
+ p.raiseError(key, "could not create intermediate group: %s", err)
+ }
+ targetNode = p.tree.GetPath(tableKey).(*Tree)
+ default:
+ p.raiseError(key, "Unknown table type for path: %s",
+ strings.Join(tableKey, "."))
+ }
+
+ if targetNode.inline {
+ p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s",
+ strings.Join(tableKey, "."))
+ }
+
+ // assign value to the found table
+ keyVal := parsedKey[len(parsedKey)-1]
+ localKey := []string{keyVal}
+ finalKey := append(tableKey, keyVal)
+ if targetNode.GetPath(localKey) != nil {
+ p.raiseError(key, "The following key was defined twice: %s",
+ strings.Join(finalKey, "."))
+ }
+ var toInsert interface{}
+
+ switch value.(type) {
+ case *Tree, []*Tree:
+ toInsert = value
+ default:
+ toInsert = &tomlValue{value: value, position: key.Position}
+ }
+ targetNode.values[keyVal] = toInsert
+ return p.parseStart
+}
+
+var errInvalidUnderscore = errors.New("invalid use of _ in number")
+
+func numberContainsInvalidUnderscore(value string) error {
+ // For large numbers, you may use underscores between digits to enhance
+ // readability. Each underscore must be surrounded by at least one digit on
+ // each side.
+
+ hasBefore := false
+ for idx, r := range value {
+ if r == '_' {
+ if !hasBefore || idx+1 >= len(value) {
+ // can't end with an underscore
+ return errInvalidUnderscore
+ }
+ }
+ hasBefore = isDigit(r)
+ }
+ return nil
+}
+
+var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number")
+
+func hexNumberContainsInvalidUnderscore(value string) error {
+ hasBefore := false
+ for idx, r := range value {
+ if r == '_' {
+ if !hasBefore || idx+1 >= len(value) {
+ // can't end with an underscore
+ return errInvalidUnderscoreHex
+ }
+ }
+ hasBefore = isHexDigit(r)
+ }
+ return nil
+}
+
+func cleanupNumberToken(value string) string {
+ cleanedVal := strings.Replace(value, "_", "", -1)
+ return cleanedVal
+}
+
+func (p *tomlParser) parseRvalue() interface{} {
+ tok := p.getToken()
+ if tok == nil || tok.typ == tokenEOF {
+ p.raiseError(tok, "expecting a value")
+ }
+
+ switch tok.typ {
+ case tokenString:
+ return tok.val
+ case tokenTrue:
+ return true
+ case tokenFalse:
+ return false
+ case tokenInf:
+ if tok.val[0] == '-' {
+ return math.Inf(-1)
+ }
+ return math.Inf(1)
+ case tokenNan:
+ return math.NaN()
+ case tokenInteger:
+ cleanedVal := cleanupNumberToken(tok.val)
+ var err error
+ var val int64
+ if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
+ switch cleanedVal[1] {
+ case 'x':
+ err = hexNumberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
+ case 'o':
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
+ case 'b':
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
+ default:
+ panic("invalid base") // the lexer should catch this first
+ }
+ } else {
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal, 10, 64)
+ }
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenFloat:
+ err := numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ cleanedVal := cleanupNumberToken(tok.val)
+ val, err := strconv.ParseFloat(cleanedVal, 64)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLocalTime:
+ val, err := ParseLocalTime(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLocalDate:
+ // a local date may be followed by:
+ // * nothing: this is a local date
+ // * a local time: this is a local date-time
+
+ next := p.peek()
+ if next == nil || next.typ != tokenLocalTime {
+ val, err := ParseLocalDate(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ }
+
+ localDate := tok
+ localTime := p.getToken()
+
+ next = p.peek()
+ if next == nil || next.typ != tokenTimeOffset {
+ v := localDate.val + "T" + localTime.val
+ val, err := ParseLocalDateTime(v)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ }
+
+ offset := p.getToken()
+
+ layout := time.RFC3339Nano
+ v := localDate.val + "T" + localTime.val + offset.val
+ val, err := time.ParseInLocation(layout, v, time.UTC)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLeftBracket:
+ return p.parseArray()
+ case tokenLeftCurlyBrace:
+ return p.parseInlineTable()
+ case tokenEqual:
+ p.raiseError(tok, "cannot have multiple equals for the same key")
+ case tokenError:
+ p.raiseError(tok, "%s", tok)
+ default:
+ panic(fmt.Errorf("unhandled token: %v", tok))
+ }
+
+ return nil
+}
+
+func tokenIsComma(t *token) bool {
+ return t != nil && t.typ == tokenComma
+}
+
+func (p *tomlParser) parseInlineTable() *Tree {
+ tree := newTree()
+ var previous *token
+Loop:
+ for {
+ follow := p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated inline table")
+ }
+ switch follow.typ {
+ case tokenRightCurlyBrace:
+ p.getToken()
+ break Loop
+ case tokenKey, tokenInteger, tokenString:
+ if !tokenIsComma(previous) && previous != nil {
+ p.raiseError(follow, "comma expected between fields in inline table")
+ }
+ key := p.getToken()
+ p.assume(tokenEqual)
+
+ parsedKey, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid key: %s", err)
+ }
+
+ value := p.parseRvalue()
+ tree.SetPath(parsedKey, value)
+ case tokenComma:
+ if tokenIsComma(previous) {
+ p.raiseError(follow, "need field between two commas in inline table")
+ }
+ p.getToken()
+ default:
+ p.raiseError(follow, "unexpected token type in inline table: %s", follow.String())
+ }
+ previous = follow
+ }
+ if tokenIsComma(previous) {
+ p.raiseError(previous, "trailing comma at the end of inline table")
+ }
+ tree.inline = true
+ return tree
+}
+
+func (p *tomlParser) parseArray() interface{} {
+ var array []interface{}
+ arrayType := reflect.TypeOf(newTree())
+ for {
+ follow := p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated array")
+ }
+ if follow.typ == tokenRightBracket {
+ p.getToken()
+ break
+ }
+ val := p.parseRvalue()
+ if reflect.TypeOf(val) != arrayType {
+ arrayType = nil
+ }
+ array = append(array, val)
+ follow = p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated array")
+ }
+ if follow.typ != tokenRightBracket && follow.typ != tokenComma {
+ p.raiseError(follow, "missing comma")
+ }
+ if follow.typ == tokenComma {
+ p.getToken()
+ }
+ }
+
+ // if the array is a mixed-type array or its length is 0,
+ // don't convert it to a table array
+ if len(array) <= 0 {
+ arrayType = nil
+ }
+ // An array of Trees is actually an array of inline
+ // tables, which is a shorthand for a table array. If the
+ // array was not converted from []interface{} to []*Tree,
+ // the two notations would not be equivalent.
+ if arrayType == reflect.TypeOf(newTree()) {
+ tomlArray := make([]*Tree, len(array))
+ for i, v := range array {
+ tomlArray[i] = v.(*Tree)
+ }
+ return tomlArray
+ }
+ return array
+}
+
+func parseToml(flow []token) *Tree {
+ result := newTree()
+ result.position = Position{1, 1}
+ parser := &tomlParser{
+ flowIdx: 0,
+ flow: flow,
+ tree: result,
+ currentTable: make([]string, 0),
+ seenTableKeys: make([]string, 0),
+ }
+ parser.run()
+ return result
+}
diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go
new file mode 100644
index 000000000..c17bff87b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/position.go
@@ -0,0 +1,29 @@
+// Position support for go-toml
+
+package toml
+
+import (
+ "fmt"
+)
+
+// Position of a document element within a TOML document.
+//
+// Line and Col are both 1-indexed positions for the element's line number and
+// column number, respectively. Values of zero or less will cause Invalid(),
+// to return true.
+type Position struct {
+ Line int // line within the document
+ Col int // column within the line
+}
+
+// String representation of the position.
+// Displays 1-indexed line and column numbers.
+func (p Position) String() string {
+ return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
+}
+
+// Invalid returns whether or not the position is valid (i.e. with negative or
+// null values)
+func (p Position) Invalid() bool {
+ return p.Line <= 0 || p.Col <= 0
+}
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
new file mode 100644
index 000000000..b437fdd3b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -0,0 +1,136 @@
+package toml
+
+import "fmt"
+
+// Define tokens
+type tokenType int
+
+const (
+ eof = -(iota + 1)
+)
+
+const (
+ tokenError tokenType = iota
+ tokenEOF
+ tokenComment
+ tokenKey
+ tokenString
+ tokenInteger
+ tokenTrue
+ tokenFalse
+ tokenFloat
+ tokenInf
+ tokenNan
+ tokenEqual
+ tokenLeftBracket
+ tokenRightBracket
+ tokenLeftCurlyBrace
+ tokenRightCurlyBrace
+ tokenLeftParen
+ tokenRightParen
+ tokenDoubleLeftBracket
+ tokenDoubleRightBracket
+ tokenLocalDate
+ tokenLocalTime
+ tokenTimeOffset
+ tokenKeyGroup
+ tokenKeyGroupArray
+ tokenComma
+ tokenColon
+ tokenDollar
+ tokenStar
+ tokenQuestion
+ tokenDot
+ tokenDotDot
+ tokenEOL
+)
+
+var tokenTypeNames = []string{
+ "Error",
+ "EOF",
+ "Comment",
+ "Key",
+ "String",
+ "Integer",
+ "True",
+ "False",
+ "Float",
+ "Inf",
+ "NaN",
+ "=",
+ "[",
+ "]",
+ "{",
+ "}",
+ "(",
+ ")",
+ "]]",
+ "[[",
+ "LocalDate",
+ "LocalTime",
+ "TimeOffset",
+ "KeyGroup",
+ "KeyGroupArray",
+ ",",
+ ":",
+ "$",
+ "*",
+ "?",
+ ".",
+ "..",
+ "EOL",
+}
+
+type token struct {
+ Position
+ typ tokenType
+ val string
+}
+
+func (tt tokenType) String() string {
+ idx := int(tt)
+ if idx < len(tokenTypeNames) {
+ return tokenTypeNames[idx]
+ }
+ return "Unknown"
+}
+
+func (t token) String() string {
+ switch t.typ {
+ case tokenEOF:
+ return "EOF"
+ case tokenError:
+ return t.val
+ }
+
+ return fmt.Sprintf("%q", t.val)
+}
+
+func isSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+func isAlphanumeric(r rune) bool {
+ return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_'
+}
+
+func isKeyChar(r rune) bool {
+ // Keys start with the first character that isn't whitespace or [ and end
+ // with the last non-whitespace character before the equals sign. Keys
+ // cannot contain a # character."
+ return !(r == '\r' || r == '\n' || r == eof || r == '=')
+}
+
+func isKeyStartChar(r rune) bool {
+ return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
+}
+
+func isDigit(r rune) bool {
+ return '0' <= r && r <= '9'
+}
+
+func isHexDigit(r rune) bool {
+ return isDigit(r) ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
new file mode 100644
index 000000000..6d82587c4
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -0,0 +1,533 @@
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+)
+
+type tomlValue struct {
+ value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
+ comment string
+ commented bool
+ multiline bool
+ literal bool
+ position Position
+}
+
+// Tree is the result of the parsing of a TOML file.
+type Tree struct {
+ values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
+ comment string
+ commented bool
+ inline bool
+ position Position
+}
+
+func newTree() *Tree {
+ return newTreeWithPosition(Position{})
+}
+
+func newTreeWithPosition(pos Position) *Tree {
+ return &Tree{
+ values: make(map[string]interface{}),
+ position: pos,
+ }
+}
+
+// TreeFromMap initializes a new Tree object using the given map.
+func TreeFromMap(m map[string]interface{}) (*Tree, error) {
+ result, err := toTree(m)
+ if err != nil {
+ return nil, err
+ }
+ return result.(*Tree), nil
+}
+
+// Position returns the position of the tree.
+func (t *Tree) Position() Position {
+ return t.position
+}
+
+// Has returns a boolean indicating if the given key exists.
+func (t *Tree) Has(key string) bool {
+ if key == "" {
+ return false
+ }
+ return t.HasPath(strings.Split(key, "."))
+}
+
+// HasPath returns true if the given path of keys exists, false otherwise.
+func (t *Tree) HasPath(keys []string) bool {
+ return t.GetPath(keys) != nil
+}
+
+// Keys returns the keys of the toplevel tree (does not recurse).
+func (t *Tree) Keys() []string {
+ keys := make([]string, len(t.values))
+ i := 0
+ for k := range t.values {
+ keys[i] = k
+ i++
+ }
+ return keys
+}
+
+// Get the value at key in the Tree.
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// If you need to retrieve non-bare keys, use GetPath.
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) Get(key string) interface{} {
+ if key == "" {
+ return t
+ }
+ return t.GetPath(strings.Split(key, "."))
+}
+
+// GetPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPath(keys []string) interface{} {
+ if len(keys) == 0 {
+ return t
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return nil
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return nil
+ }
+ subtree = node[len(node)-1]
+ default:
+ return nil // cannot navigate through other node types
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ return node.value
+ default:
+ return node
+ }
+}
+
+// GetArray returns the value at key in the Tree.
+// It returns []string, []int64, etc type if key has homogeneous lists
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetArray(key string) interface{} {
+ if key == "" {
+ return t
+ }
+ return t.GetArrayPath(strings.Split(key, "."))
+}
+
+// GetArrayPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetArrayPath(keys []string) interface{} {
+ if len(keys) == 0 {
+ return t
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return nil
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return nil
+ }
+ subtree = node[len(node)-1]
+ default:
+ return nil // cannot navigate through other node types
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ switch n := node.value.(type) {
+ case []interface{}:
+ return getArray(n)
+ default:
+ return node.value
+ }
+ default:
+ return node
+ }
+}
+
+// if homogeneous array, then return slice type object over []interface{}
+func getArray(n []interface{}) interface{} {
+ var s []string
+ var i64 []int64
+ var f64 []float64
+ var bl []bool
+ for _, value := range n {
+ switch v := value.(type) {
+ case string:
+ s = append(s, v)
+ case int64:
+ i64 = append(i64, v)
+ case float64:
+ f64 = append(f64, v)
+ case bool:
+ bl = append(bl, v)
+ default:
+ return n
+ }
+ }
+ if len(s) == len(n) {
+ return s
+ } else if len(i64) == len(n) {
+ return i64
+ } else if len(f64) == len(n) {
+ return f64
+ } else if len(bl) == len(n) {
+ return bl
+ }
+ return n
+}
+
+// GetPosition returns the position of the given key.
+func (t *Tree) GetPosition(key string) Position {
+ if key == "" {
+ return t.position
+ }
+ return t.GetPositionPath(strings.Split(key, "."))
+}
+
+// SetPositionPath sets the position of element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree position is set.
+func (t *Tree) SetPositionPath(keys []string, pos Position) {
+ if len(keys) == 0 {
+ t.position = pos
+ return
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return
+ }
+ subtree = node[len(node)-1]
+ default:
+ return
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ node.position = pos
+ return
+ case *Tree:
+ node.position = pos
+ return
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return
+ }
+ node[len(node)-1].position = pos
+ return
+ }
+}
+
+// GetPositionPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPositionPath(keys []string) Position {
+ if len(keys) == 0 {
+ return t.position
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return Position{0, 0}
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return Position{0, 0}
+ }
+ subtree = node[len(node)-1]
+ default:
+ return Position{0, 0}
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ return node.position
+ case *Tree:
+ return node.position
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return Position{0, 0}
+ }
+ return node[len(node)-1].position
+ default:
+ return Position{0, 0}
+ }
+}
+
+// GetDefault works like Get but with a default value
+func (t *Tree) GetDefault(key string, def interface{}) interface{} {
+ val := t.Get(key)
+ if val == nil {
+ return def
+ }
+ return val
+}
+
+// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour.
+// The default values within the struct are valid default options.
+type SetOptions struct {
+ Comment string
+ Commented bool
+ Multiline bool
+ Literal bool
+}
+
+// SetWithOptions is the same as Set, but allows you to provide formatting
+// instructions to the key, that will be used by Marshal().
+func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
+ t.SetPathWithOptions(strings.Split(key, "."), opts, value)
+}
+
+// SetPathWithOptions is the same as SetPath, but allows you to provide
+// formatting instructions to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
+ subtree := t
+ for i, intermediateKey := range keys[:len(keys)-1] {
+ nextTree, exists := subtree.values[intermediateKey]
+ if !exists {
+ nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
+ subtree.values[intermediateKey] = nextTree // add new element here
+ }
+ switch node := nextTree.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ // create element if it does not exist
+ node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}))
+ subtree.values[intermediateKey] = node
+ }
+ subtree = node[len(node)-1]
+ }
+ }
+
+ var toInsert interface{}
+
+ switch v := value.(type) {
+ case *Tree:
+ v.comment = opts.Comment
+ v.commented = opts.Commented
+ toInsert = value
+ case []*Tree:
+ for i := range v {
+ v[i].commented = opts.Commented
+ }
+ toInsert = value
+ case *tomlValue:
+ v.comment = opts.Comment
+ v.commented = opts.Commented
+ v.multiline = opts.Multiline
+ v.literal = opts.Literal
+ toInsert = v
+ default:
+ toInsert = &tomlValue{value: value,
+ comment: opts.Comment,
+ commented: opts.Commented,
+ multiline: opts.Multiline,
+ literal: opts.Literal,
+ position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}}
+ }
+
+ subtree.values[keys[len(keys)-1]] = toInsert
+}
+
+// Set an element in the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) Set(key string, value interface{}) {
+ t.SetWithComment(key, "", false, value)
+}
+
+// SetWithComment is the same as Set, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) {
+ t.SetPathWithComment(strings.Split(key, "."), comment, commented, value)
+}
+
+// SetPath sets an element in the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) SetPath(keys []string, value interface{}) {
+ t.SetPathWithComment(keys, "", false, value)
+}
+
+// SetPathWithComment is the same as SetPath, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
+ t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value)
+}
+
+// Delete removes a key from the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+func (t *Tree) Delete(key string) error {
+ keys, err := parseKey(key)
+ if err != nil {
+ return err
+ }
+ return t.DeletePath(keys)
+}
+
+// DeletePath removes a key from the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+func (t *Tree) DeletePath(keys []string) error {
+ keyLen := len(keys)
+ if keyLen == 1 {
+ delete(t.values, keys[0])
+ return nil
+ }
+ tree := t.GetPath(keys[:keyLen-1])
+ item := keys[keyLen-1]
+ switch node := tree.(type) {
+ case *Tree:
+ delete(node.values, item)
+ return nil
+ }
+ return errors.New("no such key to delete")
+}
+
+// createSubTree takes a tree and a key and create the necessary intermediate
+// subtrees to create a subtree at that point. In-place.
+//
+// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
+// and tree[a][b][c]
+//
+// Returns nil on success, error object on failure
+func (t *Tree) createSubTree(keys []string, pos Position) error {
+ subtree := t
+ for i, intermediateKey := range keys {
+ nextTree, exists := subtree.values[intermediateKey]
+ if !exists {
+ tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
+ tree.position = pos
+ tree.inline = subtree.inline
+ subtree.values[intermediateKey] = tree
+ nextTree = tree
+ }
+
+ switch node := nextTree.(type) {
+ case []*Tree:
+ subtree = node[len(node)-1]
+ case *Tree:
+ subtree = node
+ default:
+ return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
+ strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
+ }
+ }
+ return nil
+}
+
+// LoadBytes creates a Tree from a []byte.
+func LoadBytes(b []byte) (tree *Tree, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = errors.New(r.(string))
+ }
+ }()
+
+ if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) {
+ b = b[4:]
+ } else if len(b) >= 3 && hasUTF8BOM3(b) {
+ b = b[3:]
+ } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) {
+ b = b[2:]
+ }
+
+ tree = parseToml(lexToml(b))
+ return
+}
+
+func hasUTF16BigEndianBOM2(b []byte) bool {
+ return b[0] == 0xFE && b[1] == 0xFF
+}
+
+func hasUTF16LittleEndianBOM2(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE
+}
+
+func hasUTF8BOM3(b []byte) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+func hasUTF32BigEndianBOM4(b []byte) bool {
+ return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF
+}
+
+func hasUTF32LittleEndianBOM4(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00
+}
+
+// LoadReader creates a Tree from any io.Reader.
+func LoadReader(reader io.Reader) (tree *Tree, err error) {
+ inputBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return
+ }
+ tree, err = LoadBytes(inputBytes)
+ return
+}
+
+// Load creates a Tree from a string.
+func Load(content string) (tree *Tree, err error) {
+ return LoadBytes([]byte(content))
+}
+
+// LoadFile creates a Tree from a file.
+func LoadFile(path string) (tree *Tree, err error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return LoadReader(file)
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go
new file mode 100644
index 000000000..4136b4625
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomlpub.go
@@ -0,0 +1,71 @@
+package toml
+
+// PubTOMLValue wrapping tomlValue in order to access all properties from outside.
+type PubTOMLValue = tomlValue
+
+func (ptv *PubTOMLValue) Value() interface{} {
+ return ptv.value
+}
+func (ptv *PubTOMLValue) Comment() string {
+ return ptv.comment
+}
+func (ptv *PubTOMLValue) Commented() bool {
+ return ptv.commented
+}
+func (ptv *PubTOMLValue) Multiline() bool {
+ return ptv.multiline
+}
+func (ptv *PubTOMLValue) Position() Position {
+ return ptv.position
+}
+
+func (ptv *PubTOMLValue) SetValue(v interface{}) {
+ ptv.value = v
+}
+func (ptv *PubTOMLValue) SetComment(s string) {
+ ptv.comment = s
+}
+func (ptv *PubTOMLValue) SetCommented(c bool) {
+ ptv.commented = c
+}
+func (ptv *PubTOMLValue) SetMultiline(m bool) {
+ ptv.multiline = m
+}
+func (ptv *PubTOMLValue) SetPosition(p Position) {
+ ptv.position = p
+}
+
+// PubTree wrapping Tree in order to access all properties from outside.
+type PubTree = Tree
+
+func (pt *PubTree) Values() map[string]interface{} {
+ return pt.values
+}
+
+func (pt *PubTree) Comment() string {
+ return pt.comment
+}
+
+func (pt *PubTree) Commented() bool {
+ return pt.commented
+}
+
+func (pt *PubTree) Inline() bool {
+ return pt.inline
+}
+
+func (pt *PubTree) SetValues(v map[string]interface{}) {
+ pt.values = v
+}
+
+func (pt *PubTree) SetComment(c string) {
+ pt.comment = c
+}
+
+func (pt *PubTree) SetCommented(c bool) {
+ pt.commented = c
+}
+
+func (pt *PubTree) SetInline(i bool) {
+ pt.inline = i
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
new file mode 100644
index 000000000..80353500a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -0,0 +1,155 @@
+package toml
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+var kindToType = [reflect.String + 1]reflect.Type{
+ reflect.Bool: reflect.TypeOf(true),
+ reflect.String: reflect.TypeOf(""),
+ reflect.Float32: reflect.TypeOf(float64(1)),
+ reflect.Float64: reflect.TypeOf(float64(1)),
+ reflect.Int: reflect.TypeOf(int64(1)),
+ reflect.Int8: reflect.TypeOf(int64(1)),
+ reflect.Int16: reflect.TypeOf(int64(1)),
+ reflect.Int32: reflect.TypeOf(int64(1)),
+ reflect.Int64: reflect.TypeOf(int64(1)),
+ reflect.Uint: reflect.TypeOf(uint64(1)),
+ reflect.Uint8: reflect.TypeOf(uint64(1)),
+ reflect.Uint16: reflect.TypeOf(uint64(1)),
+ reflect.Uint32: reflect.TypeOf(uint64(1)),
+ reflect.Uint64: reflect.TypeOf(uint64(1)),
+}
+
+// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+func typeFor(k reflect.Kind) reflect.Type {
+ if k > 0 && int(k) < len(kindToType) {
+ return kindToType[k]
+ }
+ return nil
+}
+
+func simpleValueCoercion(object interface{}) (interface{}, error) {
+ switch original := object.(type) {
+ case string, bool, int64, uint64, float64, time.Time:
+ return original, nil
+ case int:
+ return int64(original), nil
+ case int8:
+ return int64(original), nil
+ case int16:
+ return int64(original), nil
+ case int32:
+ return int64(original), nil
+ case uint:
+ return uint64(original), nil
+ case uint8:
+ return uint64(original), nil
+ case uint16:
+ return uint64(original), nil
+ case uint32:
+ return uint64(original), nil
+ case float32:
+ return float64(original), nil
+ case fmt.Stringer:
+ return original.String(), nil
+ case []interface{}:
+ value := reflect.ValueOf(original)
+ length := value.Len()
+ arrayValue := reflect.MakeSlice(value.Type(), 0, length)
+ for i := 0; i < length; i++ {
+ val := value.Index(i).Interface()
+ simpleValue, err := simpleValueCoercion(val)
+ if err != nil {
+ return nil, err
+ }
+ arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+ }
+ return arrayValue.Interface(), nil
+ default:
+ return nil, fmt.Errorf("cannot convert type %T to Tree", object)
+ }
+}
+
+func sliceToTree(object interface{}) (interface{}, error) {
+ // arrays are a bit tricky, since they can represent either a
+ // collection of simple values, which is represented by one
+ // *tomlValue, or an array of tables, which is represented by an
+ // array of *Tree.
+
+ // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
+ value := reflect.ValueOf(object)
+ insideType := value.Type().Elem()
+ length := value.Len()
+ if length > 0 {
+ insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
+ }
+ if insideType.Kind() == reflect.Map {
+ // this is considered as an array of tables
+ tablesArray := make([]*Tree, 0, length)
+ for i := 0; i < length; i++ {
+ table := value.Index(i)
+ tree, err := toTree(table.Interface())
+ if err != nil {
+ return nil, err
+ }
+ tablesArray = append(tablesArray, tree.(*Tree))
+ }
+ return tablesArray, nil
+ }
+
+ sliceType := typeFor(insideType.Kind())
+ if sliceType == nil {
+ sliceType = insideType
+ }
+
+ arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
+
+ for i := 0; i < length; i++ {
+ val := value.Index(i).Interface()
+ simpleValue, err := simpleValueCoercion(val)
+ if err != nil {
+ return nil, err
+ }
+ arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+ }
+ return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
+}
+
+func toTree(object interface{}) (interface{}, error) {
+ value := reflect.ValueOf(object)
+
+ if value.Kind() == reflect.Map {
+ values := map[string]interface{}{}
+ keys := value.MapKeys()
+ for _, key := range keys {
+ if key.Kind() != reflect.String {
+ if _, ok := key.Interface().(string); !ok {
+ return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
+ }
+ }
+
+ v := value.MapIndex(key)
+ newValue, err := toTree(v.Interface())
+ if err != nil {
+ return nil, err
+ }
+ values[key.String()] = newValue
+ }
+ return &Tree{values: values, position: Position{}}, nil
+ }
+
+ if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
+ return sliceToTree(object)
+ }
+
+ simpleValue, err := simpleValueCoercion(object)
+ if err != nil {
+ return nil, err
+ }
+ return &tomlValue{value: simpleValue, position: Position{}}, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
new file mode 100644
index 000000000..c9afbdab7
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -0,0 +1,552 @@
+package toml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type valueComplexity int
+
+const (
+ valueSimple valueComplexity = iota + 1
+ valueComplex
+)
+
+type sortNode struct {
+ key string
+ complexity valueComplexity
+}
+
+// Encodes a string to a TOML-compliant multi-line string value
+// This function is a clone of the existing encodeTomlString function, except that whitespace characters
+// are preserved. Quotation marks and backslashes are also not escaped.
+func encodeMultilineTomlString(value string, commented string) string {
+ var b bytes.Buffer
+ adjacentQuoteCount := 0
+
+ b.WriteString(commented)
+ for i, rr := range value {
+ if rr != '"' {
+ adjacentQuoteCount = 0
+ } else {
+ adjacentQuoteCount++
+ }
+ switch rr {
+ case '\b':
+ b.WriteString(`\b`)
+ case '\t':
+ b.WriteString("\t")
+ case '\n':
+ b.WriteString("\n" + commented)
+ case '\f':
+ b.WriteString(`\f`)
+ case '\r':
+ b.WriteString("\r")
+ case '"':
+ if adjacentQuoteCount >= 3 || i == len(value)-1 {
+ adjacentQuoteCount = 0
+ b.WriteString(`\"`)
+ } else {
+ b.WriteString(`"`)
+ }
+ case '\\':
+ b.WriteString(`\`)
+ default:
+ intRr := uint16(rr)
+ if intRr < 0x001F {
+ b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+ } else {
+ b.WriteRune(rr)
+ }
+ }
+ }
+ return b.String()
+}
+
+// Encodes a string to a TOML-compliant string value
+func encodeTomlString(value string) string {
+ var b bytes.Buffer
+
+ for _, rr := range value {
+ switch rr {
+ case '\b':
+ b.WriteString(`\b`)
+ case '\t':
+ b.WriteString(`\t`)
+ case '\n':
+ b.WriteString(`\n`)
+ case '\f':
+ b.WriteString(`\f`)
+ case '\r':
+ b.WriteString(`\r`)
+ case '"':
+ b.WriteString(`\"`)
+ case '\\':
+ b.WriteString(`\\`)
+ default:
+ intRr := uint16(rr)
+ if intRr < 0x001F {
+ b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+ } else {
+ b.WriteRune(rr)
+ }
+ }
+ }
+ return b.String()
+}
+
+func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) {
+ var orderedVals []sortNode
+ switch ord {
+ case OrderPreserve:
+ orderedVals = sortByLines(t)
+ default:
+ orderedVals = sortAlphabetical(t)
+ }
+
+ var values []string
+ for _, node := range orderedVals {
+ k := node.key
+ v := t.values[k]
+
+ repr, err := tomlValueStringRepresentation(v, "", "", ord, false)
+ if err != nil {
+ return "", err
+ }
+ values = append(values, quoteKeyIfNeeded(k)+" = "+repr)
+ }
+ return "{ " + strings.Join(values, ", ") + " }", nil
+}
+
+func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
+ // this interface check is added to dereference the change made in the writeTo function.
+ // That change was made to allow this function to see formatting options.
+ tv, ok := v.(*tomlValue)
+ if ok {
+ v = tv.value
+ } else {
+ tv = &tomlValue{}
+ }
+
+ switch value := v.(type) {
+ case uint64:
+ return strconv.FormatUint(value, 10), nil
+ case int64:
+ return strconv.FormatInt(value, 10), nil
+ case float64:
+ // Default bit length is full 64
+ bits := 64
+ // Float panics if nan is used
+ if !math.IsNaN(value) {
+ // if 32 bit accuracy is enough to exactly show, use 32
+ _, acc := big.NewFloat(value).Float32()
+ if acc == big.Exact {
+ bits = 32
+ }
+ }
+ if math.Trunc(value) == value {
+ return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil
+ }
+ return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil
+ case string:
+ if tv.multiline {
+ if tv.literal {
+ b := strings.Builder{}
+ b.WriteString("'''\n")
+ b.Write([]byte(value))
+ b.WriteString("\n'''")
+ return b.String(), nil
+ } else {
+ return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil
+ }
+ }
+ return "\"" + encodeTomlString(value) + "\"", nil
+ case []byte:
+ b, _ := v.([]byte)
+ return string(b), nil
+ case bool:
+ if value {
+ return "true", nil
+ }
+ return "false", nil
+ case time.Time:
+ return value.Format(time.RFC3339), nil
+ case LocalDate:
+ return value.String(), nil
+ case LocalDateTime:
+ return value.String(), nil
+ case LocalTime:
+ return value.String(), nil
+ case *Tree:
+ return tomlTreeStringRepresentation(value, ord)
+ case nil:
+ return "", nil
+ }
+
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() == reflect.Slice {
+ var values []string
+ for i := 0; i < rv.Len(); i++ {
+ item := rv.Index(i).Interface()
+ itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine)
+ if err != nil {
+ return "", err
+ }
+ values = append(values, itemRepr)
+ }
+ if arraysOneElementPerLine && len(values) > 1 {
+ stringBuffer := bytes.Buffer{}
+ valueIndent := indent + ` ` // TODO: move that to a shared encoder state
+
+ stringBuffer.WriteString("[\n")
+
+ for _, value := range values {
+ stringBuffer.WriteString(valueIndent)
+ stringBuffer.WriteString(commented + value)
+ stringBuffer.WriteString(`,`)
+ stringBuffer.WriteString("\n")
+ }
+
+ stringBuffer.WriteString(indent + commented + "]")
+
+ return stringBuffer.String(), nil
+ }
+ return "[" + strings.Join(values, ", ") + "]", nil
+ }
+ return "", fmt.Errorf("unsupported value type %T: %v", v, v)
+}
+
+func getTreeArrayLine(trees []*Tree) (line int) {
+ // Prevent returning 0 for empty trees
+ line = int(^uint(0) >> 1)
+ // get lowest line number >= 0
+ for _, tv := range trees {
+ if tv.position.Line < line || line == 0 {
+ line = tv.position.Line
+ }
+ }
+ return
+}
+
+func sortByLines(t *Tree) (vals []sortNode) {
+ var (
+ line int
+ lines []int
+ tv *Tree
+ tom *tomlValue
+ node sortNode
+ )
+ vals = make([]sortNode, 0)
+ m := make(map[int]sortNode)
+
+ for k := range t.values {
+ v := t.values[k]
+ switch v.(type) {
+ case *Tree:
+ tv = v.(*Tree)
+ line = tv.position.Line
+ node = sortNode{key: k, complexity: valueComplex}
+ case []*Tree:
+ line = getTreeArrayLine(v.([]*Tree))
+ node = sortNode{key: k, complexity: valueComplex}
+ default:
+ tom = v.(*tomlValue)
+ line = tom.position.Line
+ node = sortNode{key: k, complexity: valueSimple}
+ }
+ lines = append(lines, line)
+ vals = append(vals, node)
+ m[line] = node
+ }
+ sort.Ints(lines)
+
+ for i, line := range lines {
+ vals[i] = m[line]
+ }
+
+ return vals
+}
+
+func sortAlphabetical(t *Tree) (vals []sortNode) {
+ var (
+ node sortNode
+ simpVals []string
+ compVals []string
+ )
+ vals = make([]sortNode, 0)
+ m := make(map[string]sortNode)
+
+ for k := range t.values {
+ v := t.values[k]
+ switch v.(type) {
+ case *Tree, []*Tree:
+ node = sortNode{key: k, complexity: valueComplex}
+ compVals = append(compVals, node.key)
+ default:
+ node = sortNode{key: k, complexity: valueSimple}
+ simpVals = append(simpVals, node.key)
+ }
+ vals = append(vals, node)
+ m[node.key] = node
+ }
+
+ // Simples first to match previous implementation
+ sort.Strings(simpVals)
+ i := 0
+ for _, key := range simpVals {
+ vals[i] = m[key]
+ i++
+ }
+
+ sort.Strings(compVals)
+ for _, key := range compVals {
+ vals[i] = m[key]
+ i++
+ }
+
+ return vals
+}
+
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
+ return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false)
+}
+
+func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) {
+ var orderedVals []sortNode
+
+ switch ord {
+ case OrderPreserve:
+ orderedVals = sortByLines(t)
+ default:
+ orderedVals = sortAlphabetical(t)
+ }
+
+ for _, node := range orderedVals {
+ switch node.complexity {
+ case valueComplex:
+ k := node.key
+ v := t.values[k]
+
+ combinedKey := quoteKeyIfNeeded(k)
+ if keyspace != "" {
+ combinedKey = keyspace + "." + combinedKey
+ }
+
+ switch node := v.(type) {
+ // node has to be of those two types given how keys are sorted above
+ case *Tree:
+ tv, ok := t.values[k].(*Tree)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+ if tv.comment != "" {
+ comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+
+ var commented string
+ if parentCommented || t.commented || tv.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented)
+ if err != nil {
+ return bytesCount, err
+ }
+ case []*Tree:
+ for _, subTree := range node {
+ var commented string
+ if parentCommented || t.commented || subTree.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+ }
+ default: // Simple
+ k := node.key
+ v, ok := t.values[k].(*tomlValue)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+
+ var commented string
+ if parentCommented || t.commented || v.commented {
+ commented = "# "
+ }
+ repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ if v.comment != "" {
+ comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ if !compactComments {
+ writtenBytesCountComment, errc := writeStrings(w, "\n")
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+ writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n")
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+
+ quotedKey := quoteKeyIfNeeded(k)
+ writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+ }
+
+ return bytesCount, nil
+}
+
+// quote a key if it does not fit the bare key format (A-Za-z0-9_-)
+// quoted keys use the same rules as strings
+func quoteKeyIfNeeded(k string) string {
+ // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain
+ // keys that have already been quoted.
+ // not an ideal situation, but good enough of a stop gap.
+ if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' {
+ return k
+ }
+ isBare := true
+ for _, r := range k {
+ if !isValidBareChar(r) {
+ isBare = false
+ break
+ }
+ }
+ if isBare {
+ return k
+ }
+ return quoteKey(k)
+}
+
+func quoteKey(k string) string {
+ return "\"" + encodeTomlString(k) + "\""
+}
+
+func writeStrings(w io.Writer, s ...string) (int, error) {
+ var n int
+ for i := range s {
+ b, err := io.WriteString(w, s[i])
+ n += b
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+// WriteTo encode the Tree as Toml and writes it to the writer w.
+// Returns the number of bytes written in case of success, or an error if anything happened.
+func (t *Tree) WriteTo(w io.Writer) (int64, error) {
+ return t.writeTo(w, "", "", 0, false)
+}
+
+// ToTomlString generates a human-readable representation of the current tree.
+// Output spans multiple lines, and is suitable for ingest by a TOML parser.
+// If the conversion cannot be performed, ToString returns a non-nil error.
+func (t *Tree) ToTomlString() (string, error) {
+ b, err := t.Marshal()
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+// String generates a human-readable representation of the current tree.
+// Alias of ToString. Present to implement the fmt.Stringer interface.
+func (t *Tree) String() string {
+ result, _ := t.ToTomlString()
+ return result
+}
+
+// ToMap recursively generates a representation of the tree using Go built-in structures.
+// The following types are used:
+//
+// * bool
+// * float64
+// * int64
+// * string
+// * uint64
+// * time.Time
+// * map[string]interface{} (where interface{} is any of this list)
+// * []interface{} (where interface{} is any of this list)
+func (t *Tree) ToMap() map[string]interface{} {
+ result := map[string]interface{}{}
+
+ for k, v := range t.values {
+ switch node := v.(type) {
+ case []*Tree:
+ var array []interface{}
+ for _, item := range node {
+ array = append(array, item.ToMap())
+ }
+ result[k] = array
+ case *Tree:
+ result[k] = node.ToMap()
+ case *tomlValue:
+ result[k] = tomlValueToGo(node.value)
+ }
+ }
+ return result
+}
+
+func tomlValueToGo(v interface{}) interface{} {
+ if tree, ok := v.(*Tree); ok {
+ return tree.ToMap()
+ }
+
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() != reflect.Slice {
+ return v
+ }
+ values := make([]interface{}, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ item := rv.Index(i).Interface()
+ values[i] = tomlValueToGo(item)
+ }
+ return values
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go
new file mode 100644
index 000000000..fa326308c
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go
@@ -0,0 +1,6 @@
+package toml
+
+// ValueStringRepresentation transforms an interface{} value into its toml string representation.
+func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
+ return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
+}
diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore
new file mode 100644
index 000000000..9c1d98611
--- /dev/null
+++ b/vendor/github.com/spf13/afero/.gitignore
@@ -0,0 +1,2 @@
+sftpfs/file1
+sftpfs/test/
diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml
new file mode 100644
index 000000000..e944f5947
--- /dev/null
+++ b/vendor/github.com/spf13/afero/.travis.yml
@@ -0,0 +1,26 @@
+sudo: false
+language: go
+arch:
+ - amd64
+ - ppc64e
+
+go:
+ - "1.14"
+ - "1.15"
+ - "1.16"
+ - tip
+
+os:
+ - linux
+ - osx
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+script:
+ - go build -v ./...
+ - go test -count=1 -cover -race -v ./...
+ - go vet ./...
+ - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi
diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/afero/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
new file mode 100644
index 000000000..fb8eaaf89
--- /dev/null
+++ b/vendor/github.com/spf13/afero/README.md
@@ -0,0 +1,430 @@
+![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
+
+A FileSystem Abstraction System for Go
+
+[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+# Overview
+
+Afero is a filesystem framework providing a simple, uniform and universal API
+interacting with any filesystem, as an abstraction layer providing interfaces,
+types and methods. Afero has an exceptionally clean interface and simple design
+without needless constructors or initialization methods.
+
+Afero is also a library providing a base set of interoperable backend
+filesystems that make it easy to work with afero while retaining all the power
+and benefit of the os and ioutil packages.
+
+Afero provides significant improvements over using the os package alone, most
+notably the ability to create mock and testing filesystems without relying on the disk.
+
+It is suitable for use in any situation where you would consider using the OS
+package as it provides an additional abstraction that makes it easy to use a
+memory backed file system during testing. It also adds support for the http
+filesystem for full interoperability.
+
+
+## Afero Features
+
+* A single consistent API for accessing a variety of filesystems
+* Interoperation between a variety of file system types
+* A set of interfaces to encourage and enforce interoperability between backends
+* An atomic cross platform memory backed file system
+* Support for compositional (union) file systems by combining multiple file systems acting as one
+* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
+* A set of utility functions ported from io, ioutil & hugo to be afero aware
+* Wrapper for go 1.16 filesystem abstraction `io/fs.FS`
+
+# Using Afero
+
+Afero is easy to use and easier to adopt.
+
+A few different ways you could use Afero:
+
+* Use the interfaces alone to define your own file system.
+* Wrapper for the OS packages.
+* Define different filesystems for different parts of your application.
+* Use Afero for mock filesystems while testing
+
+## Step 1: Install Afero
+
+First use go get to install the latest version of the library.
+
+ $ go get github.com/spf13/afero
+
+Next include Afero in your application.
+```go
+import "github.com/spf13/afero"
+```
+
+## Step 2: Declare a backend
+
+First define a package variable and set it to a pointer to a filesystem.
+```go
+var AppFs = afero.NewMemMapFs()
+
+or
+
+var AppFs = afero.NewOsFs()
+```
+It is important to note that if you repeat the composite literal you
+will be using a completely new and isolated filesystem. In the case of
+OsFs it will still use the same underlying filesystem but will reduce
+the ability to drop in other filesystems as desired.
+
+## Step 3: Use it like you would the OS package
+
+Throughout your application use any function and method like you normally
+would.
+
+So if my application before had:
+```go
+os.Open('/tmp/foo')
+```
+We would replace it with:
+```go
+AppFs.Open('/tmp/foo')
+```
+
+`AppFs` being the variable we defined above.
+
+
+## List of all available functions
+
+File System Methods Available:
+```go
+Chmod(name string, mode os.FileMode) : error
+Chown(name string, uid, gid int) : error
+Chtimes(name string, atime time.Time, mtime time.Time) : error
+Create(name string) : File, error
+Mkdir(name string, perm os.FileMode) : error
+MkdirAll(path string, perm os.FileMode) : error
+Name() : string
+Open(name string) : File, error
+OpenFile(name string, flag int, perm os.FileMode) : File, error
+Remove(name string) : error
+RemoveAll(path string) : error
+Rename(oldname, newname string) : error
+Stat(name string) : os.FileInfo, error
+```
+File Interfaces and Methods Available:
+```go
+io.Closer
+io.Reader
+io.ReaderAt
+io.Seeker
+io.Writer
+io.WriterAt
+
+Name() : string
+Readdir(count int) : []os.FileInfo, error
+Readdirnames(n int) : []string, error
+Stat() : os.FileInfo, error
+Sync() : error
+Truncate(size int64) : error
+WriteString(s string) : ret int, err error
+```
+In some applications it may make sense to define a new package that
+simply exports the file system variable for easy access from anywhere.
+
+## Using Afero's utility functions
+
+Afero provides a set of functions to make it easier to use the underlying file systems.
+These functions have been primarily ported from io & ioutil with some developed for Hugo.
+
+The afero utilities support all afero compatible backends.
+
+The list of utilities includes:
+
+```go
+DirExists(path string) (bool, error)
+Exists(path string) (bool, error)
+FileContainsBytes(filename string, subslice []byte) (bool, error)
+GetTempDir(subPath string) string
+IsDir(path string) (bool, error)
+IsEmpty(path string) (bool, error)
+ReadDir(dirname string) ([]os.FileInfo, error)
+ReadFile(filename string) ([]byte, error)
+SafeWriteReader(path string, r io.Reader) (err error)
+TempDir(dir, prefix string) (name string, err error)
+TempFile(dir, prefix string) (f File, err error)
+Walk(root string, walkFn filepath.WalkFunc) error
+WriteFile(filename string, data []byte, perm os.FileMode) error
+WriteReader(path string, r io.Reader) (err error)
+```
+For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
+
+They are available under two different approaches to use. You can either call
+them directly where the first parameter of each function will be the file
+system, or you can declare a new `Afero`, a custom type used to bind these
+functions as methods to a given filesystem.
+
+### Calling utilities directly
+
+```go
+fs := new(afero.MemMapFs)
+f, err := afero.TempFile(fs,"", "ioutil-test")
+
+```
+
+### Calling via Afero
+
+```go
+fs := afero.NewMemMapFs()
+afs := &afero.Afero{Fs: fs}
+f, err := afs.TempFile("", "ioutil-test")
+```
+
+## Using Afero for Testing
+
+There is a large benefit to using a mock filesystem for testing. It has a
+completely blank state every time it is initialized and can be easily
+reproducible regardless of OS. You could create files to your heart’s content
+and the file access would be fast while also saving you from all the annoying
+issues with deleting temporary files, Windows file locking, etc. The MemMapFs
+backend is perfect for testing.
+
+* Much faster than performing I/O operations on disk
+* Avoid security issues and permissions
+* Far more control. 'rm -rf /' with confidence
+* Test setup is far more easier to do
+* No test cleanup needed
+
+One way to accomplish this is to define a variable as mentioned above.
+In your application this will be set to afero.NewOsFs() during testing you
+can set it to afero.NewMemMapFs().
+
+It wouldn't be uncommon to have each test initialize a blank slate memory
+backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
+appropriate in my application code. This approach ensures that Tests are order
+independent, with no test relying on the state left by an earlier test.
+
+Then in my tests I would initialize a new MemMapFs for each test:
+```go
+func TestExist(t *testing.T) {
+ appFS := afero.NewMemMapFs()
+ // create test files and directories
+ appFS.MkdirAll("src/a", 0755)
+ afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
+ afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
+ name := "src/c"
+ _, err := appFS.Stat(name)
+ if os.IsNotExist(err) {
+ t.Errorf("file \"%s\" does not exist.\n", name)
+ }
+}
+```
+
+# Available Backends
+
+## Operating System Native
+
+### OsFs
+
+The first is simply a wrapper around the native OS calls. This makes it
+very easy to use as all of the calls are the same as the existing OS
+calls. It also makes it trivial to have your code use the OS during
+operation and a mock filesystem during testing or as needed.
+
+```go
+appfs := afero.NewOsFs()
+appfs.MkdirAll("src/a", 0755)
+```
+
+## Memory Backed Storage
+
+### MemMapFs
+
+Afero also provides a fully atomic memory backed filesystem perfect for use in
+mocking and to speed up unnecessary disk io when persistence isn’t
+necessary. It is fully concurrent and will work within go routines
+safely.
+
+```go
+mm := afero.NewMemMapFs()
+mm.MkdirAll("src/a", 0755)
+```
+
+#### InMemoryFile
+
+As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
+backed file implementation. This can be used in other memory backed file
+systems with ease. Plans are to add a radix tree memory stored file
+system using InMemoryFile.
+
+## Network Interfaces
+
+### SftpFs
+
+Afero has experimental support for secure file transfer protocol (sftp). Which can
+be used to perform file operations over a encrypted channel.
+
+## Filtering Backends
+
+### BasePathFs
+
+The BasePathFs restricts all operations to a given path within an Fs.
+The given file name to the operations on this Fs will be prepended with
+the base path before calling the source Fs.
+
+```go
+bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
+```
+
+### ReadOnlyFs
+
+A thin wrapper around the source Fs providing a read only view.
+
+```go
+fs := afero.NewReadOnlyFs(afero.NewOsFs())
+_, err := fs.Create("/file.txt")
+// err = syscall.EPERM
+```
+
+# RegexpFs
+
+A filtered view on file names, any file NOT matching
+the passed regexp will be treated as non-existing.
+Files not matching the regexp provided will not be created.
+Directories are not filtered.
+
+```go
+fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
+_, err := fs.Create("/file.html")
+// err = syscall.ENOENT
+```
+
+### HttpFs
+
+Afero provides an http compatible backend which can wrap any of the existing
+backends.
+
+The Http package requires a slightly specific version of Open which
+returns an http.File type.
+
+Afero provides an httpFs file system which satisfies this requirement.
+Any Afero FileSystem can be used as an httpFs.
+
+```go
+httpFs := afero.NewHttpFs()
+fileserver := http.FileServer(httpFs.Dir())
+http.Handle("/", fileserver)
+```
+
+## Composite Backends
+
+Afero provides the ability have two filesystems (or more) act as a single
+file system.
+
+### CacheOnReadFs
+
+The CacheOnReadFs will lazily make copies of any accessed files from the base
+layer into the overlay. Subsequent reads will be pulled from the overlay
+directly permitting the request is within the cache duration of when it was
+created in the overlay.
+
+If the base filesystem is writeable, any changes to files will be
+done first to the base, then to the overlay layer. Write calls to open file
+handles like `Write()` or `Truncate()` to the overlay first.
+
+To writing files to the overlay only, you can use the overlay Fs directly (not
+via the union Fs).
+
+Cache files in the layer for the given time.Duration, a cache duration of 0
+means "forever" meaning the file will not be re-requested from the base ever.
+
+A read-only base will make the overlay also read-only but still copy files
+from the base to the overlay when they're not present (or outdated) in the
+caching layer.
+
+```go
+base := afero.NewOsFs()
+layer := afero.NewMemMapFs()
+ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
+```
+
+### CopyOnWriteFs()
+
+The CopyOnWriteFs is a read only base file system with a potentially
+writeable layer on top.
+
+Read operations will first look in the overlay and if not found there, will
+serve the file from the base.
+
+Changes to the file system will only be made in the overlay.
+
+Any attempt to modify a file found only in the base will copy the file to the
+overlay layer before modification (including opening a file with a writable
+handle).
+
+Removing and Renaming files present only in the base layer is not currently
+permitted. If a file is present in the base layer and the overlay, only the
+overlay will be removed/renamed.
+
+```go
+ base := afero.NewOsFs()
+ roBase := afero.NewReadOnlyFs(base)
+ ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
+
+ fh, _ = ufs.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+```
+
+In this example all write operations will only occur in memory (MemMapFs)
+leaving the base filesystem (OsFs) untouched.
+
+
+## Desired/possible backends
+
+The following is a short list of possible backends we hope someone will
+implement:
+
+* SSH
+* S3
+
+# About the project
+
+## What's in the name
+
+Afero comes from the latin roots Ad-Facere.
+
+**"Ad"** is a prefix meaning "to".
+
+**"Facere"** is a form of the root "faciō" making "make or do".
+
+The literal meaning of afero is "to make" or "to do" which seems very fitting
+for a library that allows one to make files and directories and do things with them.
+
+The English word that shares the same roots as Afero is "affair". Affair shares
+the same concept but as a noun it means "something that is made or done" or "an
+object of a particular type".
+
+It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
+Googles very well.
+
+## Release Notes
+
+See the [Releases Page](https://github.com/spf13/afero/releases).
+
+## Contributing
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Add some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## Contributors
+
+Names in no particular order:
+
+* [spf13](https://github.com/spf13)
+* [jaqx0r](https://github.com/jaqx0r)
+* [mbertschler](https://github.com/mbertschler)
+* [xor-gate](https://github.com/xor-gate)
+
+## License
+
+Afero is released under the Apache 2.0 license. See
+[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go
new file mode 100644
index 000000000..469ff7d2d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/afero.go
@@ -0,0 +1,111 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package afero provides types and methods for interacting with the filesystem,
+// as an abstraction layer.
+
+// Afero also provides a few implementations that are mostly interoperable. One that
+// uses the operating system filesystem, one that uses memory to store files
+// (cross platform) and an interface that should be implemented if you want to
+// provide your own filesystem.
+
+package afero
+
+import (
+ "errors"
+ "io"
+ "os"
+ "time"
+)
+
+type Afero struct {
+ Fs
+}
+
+// File represents a file in the filesystem.
+type File interface {
+ io.Closer
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+ io.Writer
+ io.WriterAt
+
+ Name() string
+ Readdir(count int) ([]os.FileInfo, error)
+ Readdirnames(n int) ([]string, error)
+ Stat() (os.FileInfo, error)
+ Sync() error
+ Truncate(size int64) error
+ WriteString(s string) (ret int, err error)
+}
+
+// Fs is the filesystem interface.
+//
+// Any simulated or real filesystem should implement this interface.
+type Fs interface {
+ // Create creates a file in the filesystem, returning the file and an
+ // error, if any happens.
+ Create(name string) (File, error)
+
+ // Mkdir creates a directory in the filesystem, return an error if any
+ // happens.
+ Mkdir(name string, perm os.FileMode) error
+
+ // MkdirAll creates a directory path and all parents that does not exist
+ // yet.
+ MkdirAll(path string, perm os.FileMode) error
+
+ // Open opens a file, returning it or an error, if any happens.
+ Open(name string) (File, error)
+
+ // OpenFile opens a file using the given flags and the given mode.
+ OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+ // Remove removes a file identified by name, returning an error, if any
+ // happens.
+ Remove(name string) error
+
+ // RemoveAll removes a directory path and any children it contains. It
+ // does not fail if the path does not exist (return nil).
+ RemoveAll(path string) error
+
+ // Rename renames a file.
+ Rename(oldname, newname string) error
+
+ // Stat returns a FileInfo describing the named file, or an error, if any
+ // happens.
+ Stat(name string) (os.FileInfo, error)
+
+ // The name of this FileSystem
+ Name() string
+
+ // Chmod changes the mode of the named file to mode.
+ Chmod(name string, mode os.FileMode) error
+
+ // Chown changes the uid and gid of the named file.
+ Chown(name string, uid, gid int) error
+
+ //Chtimes changes the access and modification times of the named file
+ Chtimes(name string, atime time.Time, mtime time.Time) error
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml
new file mode 100644
index 000000000..5d2f34bf1
--- /dev/null
+++ b/vendor/github.com/spf13/afero/appveyor.yml
@@ -0,0 +1,15 @@
+version: '{build}'
+clone_folder: C:\gopath\src\github.com\spf13\afero
+environment:
+ GOPATH: C:\gopath
+build_script:
+- cmd: >-
+ go version
+
+ go env
+
+ go get -v github.com/spf13/afero/...
+
+ go build -v github.com/spf13/afero/...
+test_script:
+- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/...
diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
new file mode 100644
index 000000000..4f9832829
--- /dev/null
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -0,0 +1,211 @@
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+var _ Lstater = (*BasePathFs)(nil)
+
+// The BasePathFs restricts all operations to a given path within an Fs.
+// The given file name to the operations on this Fs will be prepended with
+// the base path before calling the base Fs.
+// Any file name (after filepath.Clean()) outside this base path will be
+// treated as non existing file.
+//
+// Note that it does not clean the error messages on return, so you may
+// reveal the real path on errors.
+type BasePathFs struct {
+ source Fs
+ path string
+}
+
+type BasePathFile struct {
+ File
+ path string
+}
+
+func (f *BasePathFile) Name() string {
+ sourcename := f.File.Name()
+ return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
+}
+
+func NewBasePathFs(source Fs, path string) Fs {
+ return &BasePathFs{source: source, path: path}
+}
+
+// on a file outside the base path it returns the given file name and an error,
+// else the given file with the base path prepended
+func (b *BasePathFs) RealPath(name string) (path string, err error) {
+ if err := validateBasePathName(name); err != nil {
+ return name, err
+ }
+
+ bpath := filepath.Clean(b.path)
+ path = filepath.Clean(filepath.Join(bpath, name))
+ if !strings.HasPrefix(path, bpath) {
+ return name, os.ErrNotExist
+ }
+
+ return path, nil
+}
+
+func validateBasePathName(name string) error {
+ if runtime.GOOS != "windows" {
+ // Not much to do here;
+ // the virtual file paths all look absolute on *nix.
+ return nil
+ }
+
+ // On Windows a common mistake would be to provide an absolute OS path
+ // We could strip out the base part, but that would not be very portable.
+ if filepath.IsAbs(name) {
+ return os.ErrNotExist
+ }
+
+ return nil
+}
+
+func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chtimes", Path: name, Err: err}
+ }
+ return b.source.Chtimes(name, atime, mtime)
+}
+
+func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chmod", Path: name, Err: err}
+ }
+ return b.source.Chmod(name, mode)
+}
+
+func (b *BasePathFs) Chown(name string, uid, gid int) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chown", Path: name, Err: err}
+ }
+ return b.source.Chown(name, uid, gid)
+}
+
+func (b *BasePathFs) Name() string {
+ return "BasePathFs"
+}
+
+func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "stat", Path: name, Err: err}
+ }
+ return b.source.Stat(name)
+}
+
+func (b *BasePathFs) Rename(oldname, newname string) (err error) {
+ if oldname, err = b.RealPath(oldname); err != nil {
+ return &os.PathError{Op: "rename", Path: oldname, Err: err}
+ }
+ if newname, err = b.RealPath(newname); err != nil {
+ return &os.PathError{Op: "rename", Path: newname, Err: err}
+ }
+ return b.source.Rename(oldname, newname)
+}
+
+func (b *BasePathFs) RemoveAll(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove_all", Path: name, Err: err}
+ }
+ return b.source.RemoveAll(name)
+}
+
+func (b *BasePathFs) Remove(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ return b.source.Remove(name)
+}
+
+func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
+ }
+ sourcef, err := b.source.OpenFile(name, flag, mode)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{sourcef, b.path}, nil
+}
+
+func (b *BasePathFs) Open(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "open", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.Mkdir(name, mode)
+}
+
+func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.MkdirAll(name, mode)
+}
+
+func (b *BasePathFs) Create(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "create", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ name, err := b.RealPath(name)
+ if err != nil {
+ return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
+ }
+ if lstater, ok := b.source.(Lstater); ok {
+ return lstater.LstatIfPossible(name)
+ }
+ fi, err := b.source.Stat(name)
+ return fi, false, err
+}
+
+func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error {
+ oldname, err := b.RealPath(oldname)
+ if err != nil {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
+ }
+ newname, err = b.RealPath(newname)
+ if err != nil {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
+ }
+ if linker, ok := b.source.(Linker); ok {
+ return linker.SymlinkIfPossible(oldname, newname)
+ }
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) {
+ name, err := b.RealPath(name)
+ if err != nil {
+ return "", &os.PathError{Op: "readlink", Path: name, Err: err}
+ }
+ if reader, ok := b.source.(LinkReader); ok {
+ return reader.ReadlinkIfPossible(name)
+ }
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
new file mode 100644
index 000000000..71471aa25
--- /dev/null
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -0,0 +1,311 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+// If the cache duration is 0, cache time will be unlimited, i.e. once
+// a file is in the layer, the base will never be read again for this file.
+//
+// For cache times greater than 0, the modification time of a file is
+// checked. Note that a lot of file system implementations only allow a
+// resolution of a second for timestamps... or as the godoc for os.Chtimes()
+// states: "The underlying filesystem may truncate or round the values to a
+// less precise time unit."
+//
+// This caching union will forward all write calls also to the base file
+// system first. To prevent writing to the base Fs, wrap it in a read-only
+// filter - Note: this will also make the overlay read-only, for writing files
+// in the overlay, use the overlay Fs directly, not via the union Fs.
+type CacheOnReadFs struct {
+ base Fs
+ layer Fs
+ cacheTime time.Duration
+}
+
+func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
+ return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
+}
+
+type cacheState int
+
+const (
+ // not present in the overlay, unknown if it exists in the base:
+ cacheMiss cacheState = iota
+ // present in the overlay and in base, base file is newer:
+ cacheStale
+ // present in the overlay - with cache time == 0 it may exist in the base,
+ // with cacheTime > 0 it exists in the base and is same age or newer in the
+ // overlay
+ cacheHit
+ // happens if someone writes directly to the overlay without
+ // going through this union
+ cacheLocal
+)
+
+func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
+ var lfi, bfi os.FileInfo
+ lfi, err = u.layer.Stat(name)
+ if err == nil {
+ if u.cacheTime == 0 {
+ return cacheHit, lfi, nil
+ }
+ if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
+ bfi, err = u.base.Stat(name)
+ if err != nil {
+ return cacheLocal, lfi, nil
+ }
+ if bfi.ModTime().After(lfi.ModTime()) {
+ return cacheStale, bfi, nil
+ }
+ }
+ return cacheHit, lfi, nil
+ }
+
+ if err == syscall.ENOENT || os.IsNotExist(err) {
+ return cacheMiss, nil, nil
+ }
+
+ return cacheMiss, nil, err
+}
+
+func (u *CacheOnReadFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chtimes(name, atime, mtime)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chtimes(name, atime, mtime)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chmod(name, mode)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chmod(name, mode)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CacheOnReadFs) Chown(name string, uid, gid int) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chown(name, uid, gid)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chown(name, uid, gid)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chown(name, uid, gid)
+}
+
+func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheMiss:
+ return u.base.Stat(name)
+ default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
+ return fi, nil
+ }
+}
+
+func (u *CacheOnReadFs) Rename(oldname, newname string) error {
+ st, _, err := u.cacheStatus(oldname)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Rename(oldname, newname)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(oldname); err != nil {
+ return err
+ }
+ err = u.base.Rename(oldname, newname)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+func (u *CacheOnReadFs) Remove(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.Remove(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Remove(name)
+}
+
+func (u *CacheOnReadFs) RemoveAll(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.RemoveAll(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.RemoveAll(name)
+}
+
+func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheLocal, cacheHit:
+ default:
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ }
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ bfi, err := u.base.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ lfi, err := u.layer.OpenFile(name, flag, perm)
+ if err != nil {
+ bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
+ return nil, err
+ }
+ return &UnionFile{Base: bfi, Layer: lfi}, nil
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Open(name string) (File, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+
+ switch st {
+ case cacheLocal:
+ return u.layer.Open(name)
+
+ case cacheMiss:
+ bfi, err := u.base.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if bfi.IsDir() {
+ return u.base.Open(name)
+ }
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+
+ case cacheStale:
+ if !fi.IsDir() {
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+ }
+ case cacheHit:
+ if !fi.IsDir() {
+ return u.layer.Open(name)
+ }
+ }
+ // the dirs from cacheHit, cacheStale fall down here:
+ bfile, _ := u.base.Open(name)
+ lfile, err := u.layer.Open(name)
+ if err != nil && bfile == nil {
+ return nil, err
+ }
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
+ err := u.base.Mkdir(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
+}
+
+func (u *CacheOnReadFs) Name() string {
+ return "CacheOnReadFs"
+}
+
+func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
+ err := u.base.MkdirAll(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CacheOnReadFs) Create(name string) (File, error) {
+ bfh, err := u.base.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ lfh, err := u.layer.Create(name)
+ if err != nil {
+ // oops, see comment about OS_TRUNC above, should we remove? then we have to
+ // remember if the file did not exist before
+ bfh.Close()
+ return nil, err
+ }
+ return &UnionFile{Base: bfh, Layer: lfh}, nil
+}
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
new file mode 100644
index 000000000..18b45824b
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -0,0 +1,22 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build aix darwin openbsd freebsd netbsd dragonfly
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADF
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
new file mode 100644
index 000000000..2b850e4dd
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -0,0 +1,26 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// +build !darwin
+// +build !openbsd
+// +build !freebsd
+// +build !dragonfly
+// +build !netbsd
+// +build !aix
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADFD
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go
new file mode 100644
index 000000000..6ff8f3099
--- /dev/null
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -0,0 +1,326 @@
+package afero
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*CopyOnWriteFs)(nil)
+
+// The CopyOnWriteFs is a union filesystem: a read only base file system with
+// a possibly writeable layer on top. Changes to the file system will only
+// be made in the overlay: Changing an existing file in the base layer which
+// is not present in the overlay will copy the file to the overlay ("changing"
+// includes also calls to e.g. Chtimes(), Chmod() and Chown()).
+//
+// Reading directories is currently only supported via Open(), not OpenFile().
+type CopyOnWriteFs struct {
+ base Fs
+ layer Fs
+}
+
+func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
+ return &CopyOnWriteFs{base: base, layer: layer}
+}
+
+// Returns true if the file is not in the overlay
+func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
+ if _, err := u.layer.Stat(name); err == nil {
+ return false, nil
+ }
+ _, err := u.base.Stat(name)
+ if err != nil {
+ if oerr, ok := err.(*os.PathError); ok {
+ if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
+ return false, nil
+ }
+ }
+ if err == syscall.ENOENT {
+ return false, nil
+ }
+ }
+ return true, err
+}
+
+func (u *CopyOnWriteFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chown(name, uid, gid)
+}
+
+func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
+ fi, err := u.layer.Stat(name)
+ if err != nil {
+ isNotExist := u.isNotExist(err)
+ if isNotExist {
+ return u.base.Stat(name)
+ }
+ return nil, err
+ }
+ return fi, nil
+}
+
+func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ llayer, ok1 := u.layer.(Lstater)
+ lbase, ok2 := u.base.(Lstater)
+
+ if ok1 {
+ fi, b, err := llayer.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ if ok2 {
+ fi, b, err := lbase.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ fi, err := u.Stat(name)
+
+ return fi, false, err
+}
+
+func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error {
+ if slayer, ok := u.layer.(Linker); ok {
+ return slayer.SymlinkIfPossible(oldname, newname)
+ }
+
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) {
+ if rlayer, ok := u.layer.(LinkReader); ok {
+ return rlayer.ReadlinkIfPossible(name)
+ }
+
+ if rbase, ok := u.base.(LinkReader); ok {
+ return rbase.ReadlinkIfPossible(name)
+ }
+
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
+
+func (u *CopyOnWriteFs) isNotExist(err error) bool {
+ if e, ok := err.(*os.PathError); ok {
+ err = e.Err
+ }
+ if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
+ return true
+ }
+ return false
+}
+
+// Renaming files present only in the base layer is not permitted
+func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
+ b, err := u.isBaseFile(oldname)
+ if err != nil {
+ return err
+ }
+ if b {
+ return syscall.EPERM
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+// Removing files present only in the base layer is not permitted. If
+// a file is present in the base layer and the overlay, only the overlay
+// will be removed.
+func (u *CopyOnWriteFs) Remove(name string) error {
+ err := u.layer.Remove(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) RemoveAll(name string) error {
+ err := u.layer.RemoveAll(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ if b {
+ if err = u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ dir := filepath.Dir(name)
+ isaDir, err := IsDir(u.base, dir)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if isaDir {
+ if err = u.layer.MkdirAll(dir, 0777); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ isaDir, err = IsDir(u.layer, dir)
+ if err != nil {
+ return nil, err
+ }
+ if isaDir {
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
+ }
+ if b {
+ return u.base.OpenFile(name, flag, perm)
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+// This function handles the 9 different possibilities caused
+// by the union which are the intersection of the following...
+// layer: doesn't exist, exists as a file, and exists as a directory
+// base: doesn't exist, exists as a file, and exists as a directory
+func (u *CopyOnWriteFs) Open(name string) (File, error) {
+ // Since the overlay overrides the base we check that first
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If overlay doesn't exist, return the base (base state irrelevant)
+ if b {
+ return u.base.Open(name)
+ }
+
+ // If overlay is a file, return it (base state irrelevant)
+ dir, err := IsDir(u.layer, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ return u.layer.Open(name)
+ }
+
+ // Overlay is a directory, base state now matters.
+ // Base state has 3 states to check but 2 outcomes:
+ // A. It's a file or non-readable in the base (return just the overlay)
+ // B. It's an accessible directory in the base (return a UnionFile)
+
+ // If base is file or nonreadable, return overlay
+ dir, err = IsDir(u.base, name)
+ if !dir || err != nil {
+ return u.layer.Open(name)
+ }
+
+ // Both base & layer are directories
+ // Return union file (if opens are without error)
+ bfile, bErr := u.base.Open(name)
+ lfile, lErr := u.layer.Open(name)
+
+ // If either have errors at this point something is very wrong. Return nil and the errors
+ if bErr != nil || lErr != nil {
+ return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
+ }
+
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ return ErrFileExists
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Name() string {
+ return "CopyOnWriteFs"
+}
+
+func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ // This is in line with how os.MkdirAll behaves.
+ return nil
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Create(name string) (File, error) {
+ return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
+}
diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go
new file mode 100644
index 000000000..2b86e30d1
--- /dev/null
+++ b/vendor/github.com/spf13/afero/httpFs.go
@@ -0,0 +1,114 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+type httpDir struct {
+ basePath string
+ fs HttpFs
+}
+
+func (d httpDir) Open(name string) (http.File, error) {
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+ strings.Contains(name, "\x00") {
+ return nil, errors.New("http: invalid character in file path")
+ }
+ dir := string(d.basePath)
+ if dir == "" {
+ dir = "."
+ }
+
+ f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+type HttpFs struct {
+ source Fs
+}
+
+func NewHttpFs(source Fs) *HttpFs {
+ return &HttpFs{source: source}
+}
+
+func (h HttpFs) Dir(s string) *httpDir {
+ return &httpDir{basePath: s, fs: h}
+}
+
+func (h HttpFs) Name() string { return "h HttpFs" }
+
+func (h HttpFs) Create(name string) (File, error) {
+ return h.source.Create(name)
+}
+
+func (h HttpFs) Chmod(name string, mode os.FileMode) error {
+ return h.source.Chmod(name, mode)
+}
+
+func (h HttpFs) Chown(name string, uid, gid int) error {
+ return h.source.Chown(name, uid, gid)
+}
+
+func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return h.source.Chtimes(name, atime, mtime)
+}
+
+func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
+ return h.source.Mkdir(name, perm)
+}
+
+func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
+ return h.source.MkdirAll(path, perm)
+}
+
+func (h HttpFs) Open(name string) (http.File, error) {
+ f, err := h.source.Open(name)
+ if err == nil {
+ if httpfile, ok := f.(http.File); ok {
+ return httpfile, nil
+ }
+ }
+ return nil, err
+}
+
+func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ return h.source.OpenFile(name, flag, perm)
+}
+
+func (h HttpFs) Remove(name string) error {
+ return h.source.Remove(name)
+}
+
+func (h HttpFs) RemoveAll(path string) error {
+ return h.source.RemoveAll(path)
+}
+
+func (h HttpFs) Rename(oldname, newname string) error {
+ return h.source.Rename(oldname, newname)
+}
+
+func (h HttpFs) Stat(name string) (os.FileInfo, error) {
+ return h.source.Stat(name)
+}
diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go
new file mode 100644
index 000000000..c80345536
--- /dev/null
+++ b/vendor/github.com/spf13/afero/iofs.go
@@ -0,0 +1,288 @@
+// +build go1.16
+
+package afero
+
+import (
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "time"
+)
+
+// IOFS adopts afero.Fs to stdlib io/fs.FS
+type IOFS struct {
+ Fs
+}
+
+func NewIOFS(fs Fs) IOFS {
+ return IOFS{Fs: fs}
+}
+
+var (
+ _ fs.FS = IOFS{}
+ _ fs.GlobFS = IOFS{}
+ _ fs.ReadDirFS = IOFS{}
+ _ fs.ReadFileFS = IOFS{}
+ _ fs.StatFS = IOFS{}
+ _ fs.SubFS = IOFS{}
+)
+
+func (iofs IOFS) Open(name string) (fs.File, error) {
+ const op = "open"
+
+ // by convention for fs.FS implementations we should perform this check
+ if !fs.ValidPath(name) {
+ return nil, iofs.wrapError(op, name, fs.ErrInvalid)
+ }
+
+ file, err := iofs.Fs.Open(name)
+ if err != nil {
+ return nil, iofs.wrapError(op, name, err)
+ }
+
+ // file should implement fs.ReadDirFile
+ if _, ok := file.(fs.ReadDirFile); !ok {
+ file = readDirFile{file}
+ }
+
+ return file, nil
+}
+
+func (iofs IOFS) Glob(pattern string) ([]string, error) {
+ const op = "glob"
+
+ // afero.Glob does not perform this check but it's required for implementations
+ if _, err := path.Match(pattern, ""); err != nil {
+ return nil, iofs.wrapError(op, pattern, err)
+ }
+
+ items, err := Glob(iofs.Fs, pattern)
+ if err != nil {
+ return nil, iofs.wrapError(op, pattern, err)
+ }
+
+ return items, nil
+}
+
+func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) {
+ items, err := ReadDir(iofs.Fs, name)
+ if err != nil {
+ return nil, iofs.wrapError("readdir", name, err)
+ }
+
+ ret := make([]fs.DirEntry, len(items))
+ for i := range items {
+ ret[i] = dirEntry{items[i]}
+ }
+
+ return ret, nil
+}
+
+func (iofs IOFS) ReadFile(name string) ([]byte, error) {
+ const op = "readfile"
+
+ if !fs.ValidPath(name) {
+ return nil, iofs.wrapError(op, name, fs.ErrInvalid)
+ }
+
+ bytes, err := ReadFile(iofs.Fs, name)
+ if err != nil {
+ return nil, iofs.wrapError(op, name, err)
+ }
+
+ return bytes, nil
+}
+
+func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil }
+
+func (IOFS) wrapError(op, path string, err error) error {
+ if _, ok := err.(*fs.PathError); ok {
+ return err // don't need to wrap again
+ }
+
+ return &fs.PathError{
+ Op: op,
+ Path: path,
+ Err: err,
+ }
+}
+
+// dirEntry provides adapter from os.FileInfo to fs.DirEntry
+type dirEntry struct {
+ fs.FileInfo
+}
+
+var _ fs.DirEntry = dirEntry{}
+
+func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() }
+
+func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil }
+
+// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open
+type readDirFile struct {
+ File
+}
+
+var _ fs.ReadDirFile = readDirFile{}
+
+func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
+ items, err := r.File.Readdir(n)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := make([]fs.DirEntry, len(items))
+ for i := range items {
+ ret[i] = dirEntry{items[i]}
+ }
+
+ return ret, nil
+}
+
+// FromIOFS adopts io/fs.FS to use it as afero.Fs
+// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission
+// To store modifications you may use afero.CopyOnWriteFs
+type FromIOFS struct {
+ fs.FS
+}
+
+var _ Fs = FromIOFS{}
+
+func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) }
+
+func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) }
+
+func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error {
+ return notImplemented("mkdirall", path)
+}
+
+func (f FromIOFS) Open(name string) (File, error) {
+ file, err := f.FS.Open(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return fromIOFSFile{File: file, name: name}, nil
+}
+
+func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ return f.Open(name)
+}
+
+func (f FromIOFS) Remove(name string) error {
+ return notImplemented("remove", name)
+}
+
+func (f FromIOFS) RemoveAll(path string) error {
+ return notImplemented("removeall", path)
+}
+
+func (f FromIOFS) Rename(oldname, newname string) error {
+ return notImplemented("rename", oldname)
+}
+
+func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) }
+
+func (f FromIOFS) Name() string { return "fromiofs" }
+
+func (f FromIOFS) Chmod(name string, mode os.FileMode) error {
+ return notImplemented("chmod", name)
+}
+
+func (f FromIOFS) Chown(name string, uid, gid int) error {
+ return notImplemented("chown", name)
+}
+
+func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return notImplemented("chtimes", name)
+}
+
+type fromIOFSFile struct {
+ fs.File
+ name string
+}
+
+func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) {
+ readerAt, ok := f.File.(io.ReaderAt)
+ if !ok {
+ return -1, notImplemented("readat", f.name)
+ }
+
+ return readerAt.ReadAt(p, off)
+}
+
+func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) {
+ seeker, ok := f.File.(io.Seeker)
+ if !ok {
+ return -1, notImplemented("seek", f.name)
+ }
+
+ return seeker.Seek(offset, whence)
+}
+
+func (f fromIOFSFile) Write(p []byte) (n int, err error) {
+ return -1, notImplemented("write", f.name)
+}
+
+func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) {
+ return -1, notImplemented("writeat", f.name)
+}
+
+func (f fromIOFSFile) Name() string { return f.name }
+
+func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) {
+ rdfile, ok := f.File.(fs.ReadDirFile)
+ if !ok {
+ return nil, notImplemented("readdir", f.name)
+ }
+
+ entries, err := rdfile.ReadDir(count)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := make([]os.FileInfo, len(entries))
+ for i := range entries {
+ ret[i], err = entries[i].Info()
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return ret, nil
+}
+
+func (f fromIOFSFile) Readdirnames(n int) ([]string, error) {
+ rdfile, ok := f.File.(fs.ReadDirFile)
+ if !ok {
+ return nil, notImplemented("readdir", f.name)
+ }
+
+ entries, err := rdfile.ReadDir(n)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := make([]string, len(entries))
+ for i := range entries {
+ ret[i] = entries[i].Name()
+ }
+
+ return ret, nil
+}
+
+func (f fromIOFSFile) Sync() error { return nil }
+
+func (f fromIOFSFile) Truncate(size int64) error {
+ return notImplemented("truncate", f.name)
+}
+
+func (f fromIOFSFile) WriteString(s string) (ret int, err error) {
+ return -1, notImplemented("writestring", f.name)
+}
+
+func notImplemented(op, path string) error {
+ return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission}
+}
diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go
new file mode 100644
index 000000000..a403133e2
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ioutil.go
@@ -0,0 +1,240 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+
+// ReadDir reads the directory named by dirname and returns
+// a list of sorted directory entries.
+func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
+ return ReadDir(a.Fs, dirname)
+}
+
+func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ list, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(byName(list))
+ return list, nil
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+func (a Afero) ReadFile(filename string) ([]byte, error) {
+ return ReadFile(a.Fs, filename)
+}
+
+func ReadFile(fs Fs, filename string) ([]byte, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ // It's a good but not certain bet that FileInfo will tell us exactly how much to
+ // read, so let's try it but be prepared for the answer to be wrong.
+ var n int64
+
+ if fi, err := f.Stat(); err == nil {
+ // Don't preallocate a huge buffer, just in case.
+ if size := fi.Size(); size < 1e9 {
+ n = size
+ }
+ }
+ // As initial capacity for readAll, use n + a little extra in case Size is zero,
+ // and to avoid another allocation after Read has filled the buffer. The readAll
+ // call will read into its allocated internal buffer cheaply. If the size was
+ // wrong, we'll either waste some space off the end or reallocate as needed, but
+ // in the overwhelmingly common case we'll get it just right.
+ return readAll(f, n+bytes.MinRead)
+}
+
+// readAll reads from r until an error or EOF and returns the data it read
+// from the internal buffer allocated with a specified capacity.
+func readAll(r io.Reader, capacity int64) (b []byte, err error) {
+ buf := bytes.NewBuffer(make([]byte, 0, capacity))
+ // If the buffer overflows, we will get bytes.ErrTooLarge.
+ // Return that as an error. Any other panic remains.
+ defer func() {
+ e := recover()
+ if e == nil {
+ return
+ }
+ if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
+ err = panicErr
+ } else {
+ panic(e)
+ }
+ }()
+ _, err = buf.ReadFrom(r)
+ return buf.Bytes(), err
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return readAll(r, bytes.MinRead)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ return WriteFile(a.Fs, filename, data, perm)
+}
+
+func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
+ f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// Random number state.
+// We generate random temporary file names so that there's a good
+// chance the file doesn't exist yet - keeps the number of tries in
+// TempFile to a minimum.
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+
+func nextRandom() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFile creates a new temporary file in the directory dir,
+// opens the file for reading and writing, and returns the resulting *os.File.
+// The filename is generated by taking pattern and adding a random
+// string to the end. If pattern includes a "*", the random string
+// replaces the last "*".
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func (a Afero) TempFile(dir, pattern string) (f File, err error) {
+ return TempFile(a.Fs, dir, pattern)
+}
+
+func TempFile(fs Fs, dir, pattern string) (f File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ var prefix, suffix string
+ if pos := strings.LastIndex(pattern, "*"); pos != -1 {
+ prefix, suffix = pattern[:pos], pattern[pos+1:]
+ } else {
+ prefix = pattern
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextRandom()+suffix)
+ f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
+
+// TempDir creates a new temporary directory in the directory dir
+// with a name beginning with prefix and returns the path of the
+// new directory. If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory. It is the caller's responsibility
+// to remove the directory when no longer needed.
+func (a Afero) TempDir(dir, prefix string) (name string, err error) {
+ return TempDir(a.Fs, dir, prefix)
+}
+func TempDir(fs Fs, dir, prefix string) (name string, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ try := filepath.Join(dir, prefix+nextRandom())
+ err = fs.Mkdir(try, 0700)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ if err == nil {
+ name = try
+ }
+ break
+ }
+ return
+}
diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go
new file mode 100644
index 000000000..89c1bfc0a
--- /dev/null
+++ b/vendor/github.com/spf13/afero/lstater.go
@@ -0,0 +1,27 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+)
+
+// Lstater is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
+// Else it will call Stat.
+// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
+type Lstater interface {
+ LstatIfPossible(name string) (os.FileInfo, bool, error)
+}
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 000000000..7db4b7de6
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+ if !hasMeta(pattern) {
+ // Lstat not supported by a ll filesystems.
+ if _, err = lstatIfPossible(fs, pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := filepath.Split(pattern)
+ switch dir {
+ case "":
+ dir = "."
+ case string(filepath.Separator):
+ // nothing
+ default:
+ dir = dir[0 : len(dir)-1] // chop off trailing separator
+ }
+
+ if !hasMeta(dir) {
+ return glob(fs, dir, file, nil)
+ }
+
+ var m []string
+ m, err = Glob(fs, dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(fs, d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := fs.Stat(dir)
+ if err != nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+ d, err := fs.Open(dir)
+ if err != nil {
+ return
+ }
+ defer d.Close()
+
+ names, _ := d.Readdirnames(-1)
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := filepath.Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, filepath.Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ // TODO(niemeyer): Should other magic characters be added here?
+ return strings.ContainsAny(path, "*?[")
+}
diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go
new file mode 100644
index 000000000..e104013f4
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dir.go
@@ -0,0 +1,37 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+type Dir interface {
+ Len() int
+ Names() []string
+ Files() []*FileData
+ Add(*FileData)
+ Remove(*FileData)
+}
+
+func RemoveFromMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Remove(f)
+}
+
+func AddToMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Add(f)
+}
+
+func InitializeDir(d *FileData) {
+ if d.memDir == nil {
+ d.dir = true
+ d.memDir = &DirMap{}
+ }
+}
diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go
new file mode 100644
index 000000000..03a57ee5b
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dirmap.go
@@ -0,0 +1,43 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import "sort"
+
+type DirMap map[string]*FileData
+
+func (m DirMap) Len() int { return len(m) }
+func (m DirMap) Add(f *FileData) { m[f.name] = f }
+func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
+func (m DirMap) Files() (files []*FileData) {
+ for _, f := range m {
+ files = append(files, f)
+ }
+ sort.Sort(filesSorter(files))
+ return files
+}
+
+// implement sort.Interface for []*FileData
+type filesSorter []*FileData
+
+func (s filesSorter) Len() int { return len(s) }
+func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
+
+func (m DirMap) Names() (names []string) {
+ for x := range m {
+ names = append(names, x)
+ }
+ return names
+}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
new file mode 100644
index 000000000..5a20730c2
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -0,0 +1,338 @@
+// Copyright © 2015 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+const FilePathSeparator = string(filepath.Separator)
+
+type File struct {
+ // atomic requires 64-bit alignment for struct field access
+ at int64
+ readDirCount int64
+ closed bool
+ readOnly bool
+ fileData *FileData
+}
+
+func NewFileHandle(data *FileData) *File {
+ return &File{fileData: data}
+}
+
+func NewReadOnlyFileHandle(data *FileData) *File {
+ return &File{fileData: data, readOnly: true}
+}
+
+func (f File) Data() *FileData {
+ return f.fileData
+}
+
+type FileData struct {
+ sync.Mutex
+ name string
+ data []byte
+ memDir Dir
+ dir bool
+ mode os.FileMode
+ modtime time.Time
+ uid int
+ gid int
+}
+
+func (d *FileData) Name() string {
+ d.Lock()
+ defer d.Unlock()
+ return d.name
+}
+
+func CreateFile(name string) *FileData {
+ return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
+}
+
+func CreateDir(name string) *FileData {
+ return &FileData{name: name, memDir: &DirMap{}, dir: true}
+}
+
+func ChangeFileName(f *FileData, newname string) {
+ f.Lock()
+ f.name = newname
+ f.Unlock()
+}
+
+func SetMode(f *FileData, mode os.FileMode) {
+ f.Lock()
+ f.mode = mode
+ f.Unlock()
+}
+
+func SetModTime(f *FileData, mtime time.Time) {
+ f.Lock()
+ setModTime(f, mtime)
+ f.Unlock()
+}
+
+func setModTime(f *FileData, mtime time.Time) {
+ f.modtime = mtime
+}
+
+func SetUID(f *FileData, uid int) {
+ f.Lock()
+ f.uid = uid
+ f.Unlock()
+}
+
+func SetGID(f *FileData, gid int) {
+ f.Lock()
+ f.gid = gid
+ f.Unlock()
+}
+
+func GetFileInfo(f *FileData) *FileInfo {
+ return &FileInfo{f}
+}
+
+func (f *File) Open() error {
+ atomic.StoreInt64(&f.at, 0)
+ atomic.StoreInt64(&f.readDirCount, 0)
+ f.fileData.Lock()
+ f.closed = false
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Close() error {
+ f.fileData.Lock()
+ f.closed = true
+ if !f.readOnly {
+ setModTime(f.fileData, time.Now())
+ }
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Name() string {
+ return f.fileData.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+ return &FileInfo{f.fileData}, nil
+}
+
+func (f *File) Sync() error {
+ return nil
+}
+
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+ if !f.fileData.dir {
+ return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
+ }
+ var outLength int64
+
+ f.fileData.Lock()
+ files := f.fileData.memDir.Files()[f.readDirCount:]
+ if count > 0 {
+ if len(files) < count {
+ outLength = int64(len(files))
+ } else {
+ outLength = int64(count)
+ }
+ if len(files) == 0 {
+ err = io.EOF
+ }
+ } else {
+ outLength = int64(len(files))
+ }
+ f.readDirCount += outLength
+ f.fileData.Unlock()
+
+ res = make([]os.FileInfo, outLength)
+ for i := range res {
+ res[i] = &FileInfo{files[i]}
+ }
+
+ return res, err
+}
+
+func (f *File) Readdirnames(n int) (names []string, err error) {
+ fi, err := f.Readdir(n)
+ names = make([]string, len(fi))
+ for i, f := range fi {
+ _, names[i] = filepath.Split(f.Name())
+ }
+ return names, err
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ if len(b) > 0 && int(f.at) == len(f.fileData.data) {
+ return 0, io.EOF
+ }
+ if int(f.at) > len(f.fileData.data) {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if len(f.fileData.data)-int(f.at) >= len(b) {
+ n = len(b)
+ } else {
+ n = len(f.fileData.data) - int(f.at)
+ }
+ copy(b, f.fileData.data[f.at:f.at+int64(n)])
+ atomic.AddInt64(&f.at, int64(n))
+ return
+}
+
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ prev := atomic.LoadInt64(&f.at)
+ atomic.StoreInt64(&f.at, off)
+ n, err = f.Read(b)
+ atomic.StoreInt64(&f.at, prev)
+ return
+}
+
+func (f *File) Truncate(size int64) error {
+ if f.closed == true {
+ return ErrFileClosed
+ }
+ if f.readOnly {
+ return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ if size < 0 {
+ return ErrOutOfRange
+ }
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ if size > int64(len(f.fileData.data)) {
+ diff := size - int64(len(f.fileData.data))
+ f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
+ } else {
+ f.fileData.data = f.fileData.data[0:size]
+ }
+ setModTime(f.fileData, time.Now())
+ return nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ switch whence {
+ case io.SeekStart:
+ atomic.StoreInt64(&f.at, offset)
+ case io.SeekCurrent:
+ atomic.AddInt64(&f.at, offset)
+ case io.SeekEnd:
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
+ }
+ return f.at, nil
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ if f.readOnly {
+ return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ n = len(b)
+ cur := atomic.LoadInt64(&f.at)
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ diff := cur - int64(len(f.fileData.data))
+ var tail []byte
+ if n+int(cur) < len(f.fileData.data) {
+ tail = f.fileData.data[n+int(cur):]
+ }
+ if diff > 0 {
+ f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{00}, int(diff)), b...)...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ } else {
+ f.fileData.data = append(f.fileData.data[:cur], b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ }
+ setModTime(f.fileData, time.Now())
+
+ atomic.AddInt64(&f.at, int64(n))
+ return
+}
+
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Write(b)
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+ return f.Write([]byte(s))
+}
+
+func (f *File) Info() *FileInfo {
+ return &FileInfo{f.fileData}
+}
+
+type FileInfo struct {
+ *FileData
+}
+
+// Implements os.FileInfo
+func (s *FileInfo) Name() string {
+ s.Lock()
+ _, name := filepath.Split(s.name)
+ s.Unlock()
+ return name
+}
+func (s *FileInfo) Mode() os.FileMode {
+ s.Lock()
+ defer s.Unlock()
+ return s.mode
+}
+func (s *FileInfo) ModTime() time.Time {
+ s.Lock()
+ defer s.Unlock()
+ return s.modtime
+}
+func (s *FileInfo) IsDir() bool {
+ s.Lock()
+ defer s.Unlock()
+ return s.dir
+}
+func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Size() int64 {
+ if s.IsDir() {
+ return int64(42)
+ }
+ s.Lock()
+ defer s.Unlock()
+ return int64(len(s.data))
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
new file mode 100644
index 000000000..5c265f92b
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -0,0 +1,404 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spf13/afero/mem"
+)
+
+const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod()
+
+type MemMapFs struct {
+ mu sync.RWMutex
+ data map[string]*mem.FileData
+ init sync.Once
+}
+
+func NewMemMapFs() Fs {
+ return &MemMapFs{}
+}
+
+func (m *MemMapFs) getData() map[string]*mem.FileData {
+ m.init.Do(func() {
+ m.data = make(map[string]*mem.FileData)
+ // Root should always exist, right?
+ // TODO: what about windows?
+ root := mem.CreateDir(FilePathSeparator)
+ mem.SetMode(root, os.ModeDir|0755)
+ m.data[FilePathSeparator] = root
+ })
+ return m.data
+}
+
+func (*MemMapFs) Name() string { return "MemMapFS" }
+
+func (m *MemMapFs) Create(name string) (File, error) {
+ name = normalizePath(name)
+ m.mu.Lock()
+ file := mem.CreateFile(name)
+ m.getData()[name] = file
+ m.registerWithParent(file, 0)
+ m.mu.Unlock()
+ return mem.NewFileHandle(file), nil
+}
+
+func (m *MemMapFs) unRegisterWithParent(fileName string) error {
+ f, err := m.lockfreeOpen(fileName)
+ if err != nil {
+ return err
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ log.Panic("parent of ", f.Name(), " is nil")
+ }
+
+ parent.Lock()
+ mem.RemoveFromMemDir(parent, f)
+ parent.Unlock()
+ return nil
+}
+
+func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
+ pdir, _ := filepath.Split(f.Name())
+ pdir = filepath.Clean(pdir)
+ pfile, err := m.lockfreeOpen(pdir)
+ if err != nil {
+ return nil
+ }
+ return pfile
+}
+
+func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) {
+ if f == nil {
+ return
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ pdir := filepath.Dir(filepath.Clean(f.Name()))
+ err := m.lockfreeMkdir(pdir, perm)
+ if err != nil {
+ //log.Println("Mkdir error:", err)
+ return
+ }
+ parent, err = m.lockfreeOpen(pdir)
+ if err != nil {
+ //log.Println("Open after Mkdir error:", err)
+ return
+ }
+ }
+
+ parent.Lock()
+ mem.InitializeDir(parent)
+ mem.AddToMemDir(parent, f)
+ parent.Unlock()
+}
+
+func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+ x, ok := m.getData()[name]
+ if ok {
+ // Only return ErrFileExists if it's a file, not a directory.
+ i := mem.FileInfo{FileData: x}
+ if !i.IsDir() {
+ return ErrFileExists
+ }
+ } else {
+ item := mem.CreateDir(name)
+ mem.SetMode(item, os.ModeDir|perm)
+ m.getData()[name] = item
+ m.registerWithParent(item, perm)
+ }
+ return nil
+}
+
+func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+ perm &= chmodBits
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ _, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if ok {
+ return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+ }
+
+ m.mu.Lock()
+ item := mem.CreateDir(name)
+ mem.SetMode(item, os.ModeDir|perm)
+ m.getData()[name] = item
+ m.registerWithParent(item, perm)
+ m.mu.Unlock()
+
+ return m.setFileMode(name, perm|os.ModeDir)
+}
+
+func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
+ err := m.Mkdir(path, perm)
+ if err != nil {
+ if err.(*os.PathError).Err == ErrFileExists {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// Handle some relative paths
+func normalizePath(path string) string {
+ path = filepath.Clean(path)
+
+ switch path {
+ case ".":
+ return FilePathSeparator
+ case "..":
+ return FilePathSeparator
+ default:
+ return path
+ }
+}
+
+func (m *MemMapFs) Open(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewReadOnlyFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) openWrite(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) open(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
+ }
+ return f, nil
+}
+
+func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+ f, ok := m.getData()[name]
+ if ok {
+ return f, nil
+ } else {
+ return nil, ErrFileNotFound
+ }
+}
+
+func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ perm &= chmodBits
+ chmod := false
+ file, err := m.openWrite(name)
+ if err == nil && (flag&os.O_EXCL > 0) {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists}
+ }
+ if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
+ file, err = m.Create(name)
+ chmod = true
+ }
+ if err != nil {
+ return nil, err
+ }
+ if flag == os.O_RDONLY {
+ file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
+ }
+ if flag&os.O_APPEND > 0 {
+ _, err = file.Seek(0, os.SEEK_END)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
+ err = file.Truncate(0)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if chmod {
+ return file, m.setFileMode(name, perm)
+ }
+ return file, nil
+}
+
+func (m *MemMapFs) Remove(name string) error {
+ name = normalizePath(name)
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if _, ok := m.getData()[name]; ok {
+ err := m.unRegisterWithParent(name)
+ if err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ delete(m.getData(), name)
+ } else {
+ return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
+ }
+ return nil
+}
+
+func (m *MemMapFs) RemoveAll(path string) error {
+ path = normalizePath(path)
+ m.mu.Lock()
+ m.unRegisterWithParent(path)
+ m.mu.Unlock()
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ for p := range m.getData() {
+ if strings.HasPrefix(p, path) {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ delete(m.getData(), p)
+ m.mu.Unlock()
+ m.mu.RLock()
+ }
+ }
+ return nil
+}
+
+func (m *MemMapFs) Rename(oldname, newname string) error {
+ oldname = normalizePath(oldname)
+ newname = normalizePath(newname)
+
+ if oldname == newname {
+ return nil
+ }
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ if _, ok := m.getData()[oldname]; ok {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ m.unRegisterWithParent(oldname)
+ fileData := m.getData()[oldname]
+ delete(m.getData(), oldname)
+ mem.ChangeFileName(fileData, newname)
+ m.getData()[newname] = fileData
+ m.registerWithParent(fileData, 0)
+ m.mu.Unlock()
+ m.mu.RLock()
+ } else {
+ return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
+ }
+ return nil
+}
+
+func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ fileInfo, err := m.Stat(name)
+ return fileInfo, false, err
+}
+
+func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
+ f, err := m.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ fi := mem.GetFileInfo(f.(*mem.File).Data())
+ return fi, nil
+}
+
+func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+ mode &= chmodBits
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+ prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits
+
+ mode = prevOtherBits | mode
+ return m.setFileMode(name, mode)
+}
+
+func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetMode(f, mode)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) Chown(name string, uid, gid int) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound}
+ }
+
+ mem.SetUID(f, uid)
+ mem.SetGID(f, gid)
+
+ return nil
+}
+
+func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetModTime(f, mtime)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) List() {
+ for _, x := range m.data {
+ y := mem.FileInfo{FileData: x}
+ fmt.Println(x.Name(), y.Size())
+ }
+}
diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go
new file mode 100644
index 000000000..f1366321e
--- /dev/null
+++ b/vendor/github.com/spf13/afero/os.go
@@ -0,0 +1,113 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "time"
+)
+
+var _ Lstater = (*OsFs)(nil)
+
+// OsFs is a Fs implementation that uses functions provided by the os package.
+//
+// For details in any method, check the documentation of the os package
+// (http://golang.org/pkg/os/).
+type OsFs struct{}
+
+func NewOsFs() Fs {
+ return &OsFs{}
+}
+
+func (OsFs) Name() string { return "OsFs" }
+
+func (OsFs) Create(name string) (File, error) {
+ f, e := os.Create(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Mkdir(name string, perm os.FileMode) error {
+ return os.Mkdir(name, perm)
+}
+
+func (OsFs) MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+func (OsFs) Open(name string) (File, error) {
+ f, e := os.Open(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ f, e := os.OpenFile(name, flag, perm)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Remove(name string) error {
+ return os.Remove(name)
+}
+
+func (OsFs) RemoveAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func (OsFs) Rename(oldname, newname string) error {
+ return os.Rename(oldname, newname)
+}
+
+func (OsFs) Stat(name string) (os.FileInfo, error) {
+ return os.Stat(name)
+}
+
+func (OsFs) Chmod(name string, mode os.FileMode) error {
+ return os.Chmod(name, mode)
+}
+
+func (OsFs) Chown(name string, uid, gid int) error {
+ return os.Chown(name, uid, gid)
+}
+
+func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return os.Chtimes(name, atime, mtime)
+}
+
+func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ fi, err := os.Lstat(name)
+ return fi, true, err
+}
+
+func (OsFs) SymlinkIfPossible(oldname, newname string) error {
+ return os.Symlink(oldname, newname)
+}
+
+func (OsFs) ReadlinkIfPossible(name string) (string, error) {
+ return os.Readlink(name)
+}
diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go
new file mode 100644
index 000000000..18f60a0f6
--- /dev/null
+++ b/vendor/github.com/spf13/afero/path.go
@@ -0,0 +1,106 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+// adapted from https://golang.org/src/path/filepath/path.go
+func readDirNames(fs Fs, dirname string) ([]string, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// walk recursively descends path, calling walkFn
+// adapted from https://golang.org/src/path/filepath/path.go
+func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ err := walkFn(path, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(fs, path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := lstatIfPossible(fs, filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walk(fs, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// if the filesystem supports it, use Lstat, else use fs.Stat
+func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
+ if lfs, ok := fs.(Lstater); ok {
+ fi, _, err := lfs.LstatIfPossible(path)
+ return fi, err
+ }
+ return fs.Stat(path)
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+
+func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
+ return Walk(a.Fs, root, walkFn)
+}
+
+func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
+ info, err := lstatIfPossible(fs, root)
+ if err != nil {
+ return walkFn(root, nil, err)
+ }
+ return walk(fs, root, info, walkFn)
+}
diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go
new file mode 100644
index 000000000..bd8f9264d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/readonlyfs.go
@@ -0,0 +1,96 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*ReadOnlyFs)(nil)
+
+type ReadOnlyFs struct {
+ source Fs
+}
+
+func NewReadOnlyFs(source Fs) Fs {
+ return &ReadOnlyFs{source: source}
+}
+
+func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
+ return ReadDir(r.source, name)
+}
+
+func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chown(n string, uid, gid int) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Name() string {
+ return "ReadOnlyFilter"
+}
+
+func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
+ return r.source.Stat(name)
+}
+
+func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ if lsf, ok := r.source.(Lstater); ok {
+ return lsf.LstatIfPossible(name)
+ }
+ fi, err := r.Stat(name)
+ return fi, false, err
+}
+
+func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) {
+ if srdr, ok := r.source.(LinkReader); ok {
+ return srdr.ReadlinkIfPossible(name)
+ }
+
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
+
+func (r *ReadOnlyFs) Rename(o, n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) RemoveAll(p string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Remove(n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ return nil, syscall.EPERM
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *ReadOnlyFs) Open(n string) (File, error) {
+ return r.source.Open(n)
+}
+
+func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Create(n string) (File, error) {
+ return nil, syscall.EPERM
+}
diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go
new file mode 100644
index 000000000..ac359c62a
--- /dev/null
+++ b/vendor/github.com/spf13/afero/regexpfs.go
@@ -0,0 +1,224 @@
+package afero
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+ "time"
+)
+
+// The RegexpFs filters files (not directories) by regular expression. Only
+// files matching the given regexp will be allowed, all others get a ENOENT error (
+// "No such file or directory").
+//
+type RegexpFs struct {
+ re *regexp.Regexp
+ source Fs
+}
+
+func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
+ return &RegexpFs{source: source, re: re}
+}
+
+type RegexpFile struct {
+ f File
+ re *regexp.Regexp
+}
+
+func (r *RegexpFs) matchesName(name string) error {
+ if r.re == nil {
+ return nil
+ }
+ if r.re.MatchString(name) {
+ return nil
+ }
+ return syscall.ENOENT
+}
+
+func (r *RegexpFs) dirOrMatches(name string) error {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ return r.matchesName(name)
+}
+
+func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chtimes(name, a, m)
+}
+
+func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chmod(name, mode)
+}
+
+func (r *RegexpFs) Chown(name string, uid, gid int) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chown(name, uid, gid)
+}
+
+func (r *RegexpFs) Name() string {
+ return "RegexpFs"
+}
+
+func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.Stat(name)
+}
+
+func (r *RegexpFs) Rename(oldname, newname string) error {
+ dir, err := IsDir(r.source, oldname)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ if err := r.matchesName(oldname); err != nil {
+ return err
+ }
+ if err := r.matchesName(newname); err != nil {
+ return err
+ }
+ return r.source.Rename(oldname, newname)
+}
+
+func (r *RegexpFs) RemoveAll(p string) error {
+ dir, err := IsDir(r.source, p)
+ if err != nil {
+ return err
+ }
+ if !dir {
+ if err := r.matchesName(p); err != nil {
+ return err
+ }
+ }
+ return r.source.RemoveAll(p)
+}
+
+func (r *RegexpFs) Remove(name string) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Remove(name)
+}
+
+func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *RegexpFs) Open(name string) (File, error) {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ }
+ f, err := r.source.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &RegexpFile{f: f, re: r.re}, nil
+}
+
+func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
+ return r.source.Mkdir(n, p)
+}
+
+func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
+ return r.source.MkdirAll(n, p)
+}
+
+func (r *RegexpFs) Create(name string) (File, error) {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ return r.source.Create(name)
+}
+
+func (f *RegexpFile) Close() error {
+ return f.f.Close()
+}
+
+func (f *RegexpFile) Read(s []byte) (int, error) {
+ return f.f.Read(s)
+}
+
+func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
+ return f.f.ReadAt(s, o)
+}
+
+func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
+ return f.f.Seek(o, w)
+}
+
+func (f *RegexpFile) Write(s []byte) (int, error) {
+ return f.f.Write(s)
+}
+
+func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
+ return f.f.WriteAt(s, o)
+}
+
+func (f *RegexpFile) Name() string {
+ return f.f.Name()
+}
+
+func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
+ var rfi []os.FileInfo
+ rfi, err = f.f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range rfi {
+ if i.IsDir() || f.re.MatchString(i.Name()) {
+ fi = append(fi, i)
+ }
+ }
+ return fi, nil
+}
+
+func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
+ fi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range fi {
+ n = append(n, s.Name())
+ }
+ return n, nil
+}
+
+func (f *RegexpFile) Stat() (os.FileInfo, error) {
+ return f.f.Stat()
+}
+
+func (f *RegexpFile) Sync() error {
+ return f.f.Sync()
+}
+
+func (f *RegexpFile) Truncate(s int64) error {
+ return f.f.Truncate(s)
+}
+
+func (f *RegexpFile) WriteString(s string) (int, error) {
+ return f.f.WriteString(s)
+}
diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go
new file mode 100644
index 000000000..d1c6ea53d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/symlink.go
@@ -0,0 +1,55 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+)
+
+// Symlinker is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It indicates support for 3 symlink related interfaces that implement the
+// behaviors of the os methods:
+// - Lstat
+// - Symlink, and
+// - Readlink
+type Symlinker interface {
+ Lstater
+ Linker
+ LinkReader
+}
+
+// Linker is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem,
+// or the filesystem otherwise supports Symlink's.
+type Linker interface {
+ SymlinkIfPossible(oldname, newname string) error
+}
+
+// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system
+// does not support Symlink's either directly or through its delegated filesystem.
+// As expressed by support for the Linker interface.
+var ErrNoSymlink = errors.New("symlink not supported")
+
+// LinkReader is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+type LinkReader interface {
+ ReadlinkIfPossible(name string) (string, error)
+}
+
+// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system
+// does not support the readlink operation either directly or through its delegated filesystem.
+// As expressed by support for the LinkReader interface.
+var ErrNoReadlink = errors.New("readlink not supported")
diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go
new file mode 100644
index 000000000..985363eea
--- /dev/null
+++ b/vendor/github.com/spf13/afero/unionFile.go
@@ -0,0 +1,317 @@
+package afero
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// The UnionFile implements the afero.File interface and will be returned
+// when reading a directory present at least in the overlay or opening a file
+// for writing.
+//
+// The calls to
+// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
+// base and the overlay - for files present in both layers, only those
+// from the overlay will be used.
+//
+// When opening files for writing (Create() / OpenFile() with the right flags)
+// the operations will be done in both layers, starting with the overlay. A
+// successful read in the overlay will move the cursor position in the base layer
+// by the number of bytes read.
+type UnionFile struct {
+ Base File
+ Layer File
+ Merger DirsMerger
+ off int
+ files []os.FileInfo
+}
+
+func (f *UnionFile) Close() error {
+ // first close base, so we have a newer timestamp in the overlay. If we'd close
+ // the overlay first, we'd get a cacheStale the next time we access this file
+ // -> cache would be useless ;-)
+ if f.Base != nil {
+ f.Base.Close()
+ }
+ if f.Layer != nil {
+ return f.Layer.Close()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Read(s []byte) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.Read(s)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ // advance the file position also in the base file, the next
+ // call may be a write at this position (or a seek with SEEK_CUR)
+ if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
+ // only overwrite err in case the seek fails: we need to
+ // report an eventual io.EOF to the caller
+ err = seekErr
+ }
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Read(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.ReadAt(s, o)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.ReadAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
+ if f.Layer != nil {
+ pos, err = f.Layer.Seek(o, w)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o, w)
+ }
+ return pos, err
+ }
+ if f.Base != nil {
+ return f.Base.Seek(o, w)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Write(s []byte) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.Write(s)
+ if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
+ _, err = f.Base.Write(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Write(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteAt(s, o)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteAt(s, o)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Name() string {
+ if f.Layer != nil {
+ return f.Layer.Name()
+ }
+ return f.Base.Name()
+}
+
+// DirsMerger is how UnionFile weaves two directories together.
+// It takes the FileInfo slices from the layer and the base and returns a
+// single view.
+type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
+
+var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
+ var files = make(map[string]os.FileInfo)
+
+ for _, fi := range lofi {
+ files[fi.Name()] = fi
+ }
+
+ for _, fi := range bofi {
+ if _, exists := files[fi.Name()]; !exists {
+ files[fi.Name()] = fi
+ }
+ }
+
+ rfi := make([]os.FileInfo, len(files))
+
+ i := 0
+ for _, fi := range files {
+ rfi[i] = fi
+ i++
+ }
+
+ return rfi, nil
+
+}
+
+// Readdir will weave the two directories together and
+// return a single view of the overlayed directories.
+// At the end of the directory view, the error is io.EOF if c > 0.
+func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
+ var merge DirsMerger = f.Merger
+ if merge == nil {
+ merge = defaultUnionMergeDirsFn
+ }
+
+ if f.off == 0 {
+ var lfi []os.FileInfo
+ if f.Layer != nil {
+ lfi, err = f.Layer.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var bfi []os.FileInfo
+ if f.Base != nil {
+ bfi, err = f.Base.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ merged, err := merge(lfi, bfi)
+ if err != nil {
+ return nil, err
+ }
+ f.files = append(f.files, merged...)
+ }
+ files := f.files[f.off:]
+
+ if c <= 0 {
+ return files, nil
+ }
+
+ if len(files) == 0 {
+ return nil, io.EOF
+ }
+
+ if c > len(files) {
+ c = len(files)
+ }
+
+ defer func() { f.off += c }()
+ return files[:c], nil
+}
+
+func (f *UnionFile) Readdirnames(c int) ([]string, error) {
+ rfi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for _, fi := range rfi {
+ names = append(names, fi.Name())
+ }
+ return names, nil
+}
+
+func (f *UnionFile) Stat() (os.FileInfo, error) {
+ if f.Layer != nil {
+ return f.Layer.Stat()
+ }
+ if f.Base != nil {
+ return f.Base.Stat()
+ }
+ return nil, BADFD
+}
+
+func (f *UnionFile) Sync() (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Sync()
+ if err == nil && f.Base != nil {
+ err = f.Base.Sync()
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Sync()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Truncate(s int64) (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Truncate(s)
+ if err == nil && f.Base != nil {
+ err = f.Base.Truncate(s)
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Truncate(s)
+ }
+ return BADFD
+}
+
+func (f *UnionFile) WriteString(s string) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteString(s)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteString(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteString(s)
+ }
+ return 0, BADFD
+}
+
+func copyToLayer(base Fs, layer Fs, name string) error {
+ bfh, err := base.Open(name)
+ if err != nil {
+ return err
+ }
+ defer bfh.Close()
+
+ // First make sure the directory exists
+ exists, err := Exists(layer, filepath.Dir(name))
+ if err != nil {
+ return err
+ }
+ if !exists {
+ err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create the file on the overlay
+ lfh, err := layer.Create(name)
+ if err != nil {
+ return err
+ }
+ n, err := io.Copy(lfh, bfh)
+ if err != nil {
+ // If anything fails, clean up the file
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+
+ bfi, err := bfh.Stat()
+ if err != nil || bfi.Size() != n {
+ layer.Remove(name)
+ lfh.Close()
+ return syscall.EIO
+ }
+
+ err = lfh.Close()
+ if err != nil {
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+ return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
+}
diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go
new file mode 100644
index 000000000..4f253f481
--- /dev/null
+++ b/vendor/github.com/spf13/afero/util.go
@@ -0,0 +1,330 @@
+// Copyright ©2015 Steve Francia
+// Portions Copyright ©2015 The Hugo Authors
+// Portions Copyright 2016-present Bjørn Erik Pedersen
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "golang.org/x/text/transform"
+ "golang.org/x/text/unicode/norm"
+)
+
+// Filepath separator defined by os.Separator.
+const FilePathSeparator = string(filepath.Separator)
+
+// Takes a reader and a path and writes the content
+func (a Afero) WriteReader(path string, r io.Reader) (err error) {
+ return WriteReader(a.Fs, path, r)
+}
+
+func WriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ if err != os.ErrExist {
+ return err
+ }
+ }
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+// Same as WriteReader but checks to see if file/directory already exists.
+func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
+ return SafeWriteReader(a.Fs, path, r)
+}
+
+func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ return
+ }
+ }
+
+ exists, err := Exists(fs, path)
+ if err != nil {
+ return
+ }
+ if exists {
+ return fmt.Errorf("%v already exists", path)
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+func (a Afero) GetTempDir(subPath string) string {
+ return GetTempDir(a.Fs, subPath)
+}
+
+// GetTempDir returns the default temp directory with trailing slash
+// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
+func GetTempDir(fs Fs, subPath string) string {
+ addSlash := func(p string) string {
+ if FilePathSeparator != p[len(p)-1:] {
+ p = p + FilePathSeparator
+ }
+ return p
+ }
+ dir := addSlash(os.TempDir())
+
+ if subPath != "" {
+ // preserve windows backslash :-(
+ if FilePathSeparator == "\\" {
+ subPath = strings.Replace(subPath, "\\", "____", -1)
+ }
+ dir = dir + UnicodeSanitize((subPath))
+ if FilePathSeparator == "\\" {
+ dir = strings.Replace(dir, "____", "\\", -1)
+ }
+
+ if exists, _ := Exists(fs, dir); exists {
+ return addSlash(dir)
+ }
+
+ err := fs.MkdirAll(dir, 0777)
+ if err != nil {
+ panic(err)
+ }
+ dir = addSlash(dir)
+ }
+ return dir
+}
+
+// Rewrite string to remove non-standard path characters
+func UnicodeSanitize(s string) string {
+ source := []rune(s)
+ target := make([]rune, 0, len(source))
+
+ for _, r := range source {
+ if unicode.IsLetter(r) ||
+ unicode.IsDigit(r) ||
+ unicode.IsMark(r) ||
+ r == '.' ||
+ r == '/' ||
+ r == '\\' ||
+ r == '_' ||
+ r == '-' ||
+ r == '%' ||
+ r == ' ' ||
+ r == '#' {
+ target = append(target, r)
+ }
+ }
+
+ return string(target)
+}
+
+// Transform characters with accents into plain forms.
+func NeuterAccents(s string) string {
+ t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
+ result, _, _ := transform.String(t, string(s))
+
+ return result
+}
+
+func isMn(r rune) bool {
+ return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
+}
+
+func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
+ return FileContainsBytes(a.Fs, filename, subslice)
+}
+
+// Check if a file contains a specified byte slice.
+func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslice), nil
+}
+
+func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
+ return FileContainsAnyBytes(a.Fs, filename, subslices)
+}
+
+// Check if a file contains any of the specified byte slices.
+func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslices...), nil
+}
+
+// readerContains reports whether any of the subslices is within r.
+func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
+
+ if r == nil || len(subslices) == 0 {
+ return false
+ }
+
+ largestSlice := 0
+
+ for _, sl := range subslices {
+ if len(sl) > largestSlice {
+ largestSlice = len(sl)
+ }
+ }
+
+ if largestSlice == 0 {
+ return false
+ }
+
+ bufflen := largestSlice * 4
+ halflen := bufflen / 2
+ buff := make([]byte, bufflen)
+ var err error
+ var n, i int
+
+ for {
+ i++
+ if i == 1 {
+ n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
+ } else {
+ if i != 2 {
+ // shift left to catch overlapping matches
+ copy(buff[:], buff[halflen:])
+ }
+ n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
+ }
+
+ if n > 0 {
+ for _, sl := range subslices {
+ if bytes.Contains(buff, sl) {
+ return true
+ }
+ }
+ }
+
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+func (a Afero) DirExists(path string) (bool, error) {
+ return DirExists(a.Fs, path)
+}
+
+// DirExists checks if a path exists and is a directory.
+func DirExists(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err == nil && fi.IsDir() {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func (a Afero) IsDir(path string) (bool, error) {
+ return IsDir(a.Fs, path)
+}
+
+// IsDir checks if a given path is a directory.
+func IsDir(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ return fi.IsDir(), nil
+}
+
+func (a Afero) IsEmpty(path string) (bool, error) {
+ return IsEmpty(a.Fs, path)
+}
+
+// IsEmpty checks if a given file or directory is empty.
+func IsEmpty(fs Fs, path string) (bool, error) {
+ if b, _ := Exists(fs, path); !b {
+ return false, fmt.Errorf("%q path does not exist", path)
+ }
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ if fi.IsDir() {
+ f, err := fs.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+ list, err := f.Readdir(-1)
+ return len(list) == 0, nil
+ }
+ return fi.Size() == 0, nil
+}
+
+func (a Afero) Exists(path string) (bool, error) {
+ return Exists(a.Fs, path)
+}
+
+// Check if a file or directory exists.
+func Exists(fs Fs, path string) (bool, error) {
+ _, err := fs.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
+ combinedPath := filepath.Join(basePathFs.path, relativePath)
+ if parent, ok := basePathFs.source.(*BasePathFs); ok {
+ return FullBaseFsPath(parent, combinedPath)
+ }
+
+ return combinedPath
+}
diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore
new file mode 100644
index 000000000..53053a8ac
--- /dev/null
+++ b/vendor/github.com/spf13/cast/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+*.bench
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/spf13/cast/LICENSE
similarity index 96%
rename from vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
rename to vendor/github.com/spf13/cast/LICENSE
index 1cade6cef..4527efb9c 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
+++ b/vendor/github.com/spf13/cast/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) 2014 Brian Goff
+Copyright (c) 2014 Steve Francia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 000000000..f01a5dbb6
--- /dev/null
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,40 @@
+GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2)
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+ go test ./...
+
+test-race: ## Run tests with race detector
+ go test -race ./...
+
+fmt: ## Run gofmt linter
+ifeq "$(GOVERSION)" "12"
+ @for d in `go list` ; do \
+ if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+ echo "^ improperly formatted go files" && echo && exit 1; \
+ fi \
+ done
+endif
+
+lint: ## Run golint linter
+ @for d in `go list` ; do \
+ if [ "`golint $$d | tee /dev/stderr`" ]; then \
+ echo "^ golint errors!" && echo && exit 1; \
+ fi \
+ done
+
+vet: ## Run go vet linter
+ @if [ "`go vet | tee /dev/stderr`" ]; then \
+ echo "^ go vet errors!" && echo && exit 1; \
+ fi
+
+test-cover-html: ## Generate test coverage report
+ go test -coverprofile=coverage.out -covermode=count
+ go tool cover -func=coverage.out
+
+help:
+ @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
new file mode 100644
index 000000000..120a57342
--- /dev/null
+++ b/vendor/github.com/spf13/cast/README.md
@@ -0,0 +1,75 @@
+cast
+====
+[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast)
+[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast)
+
+Easy and safe casting from one type to another in Go
+
+Don’t Panic! ... Cast
+
+## What is Cast?
+
+Cast is a library to convert between different go types in a consistent and easy way.
+
+Cast provides simple functions to easily convert a number to a string, an
+interface into a bool, etc. Cast does this intelligently when an obvious
+conversion is possible. It doesn’t make any attempts to guess what you meant,
+for example you can only convert a string to an int when it is a string
+representation of an int such as “8”. Cast was developed for use in
+[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
+for meta data.
+
+## Why use Cast?
+
+When working with dynamic data in Go you often need to cast or convert the data
+from one type into another. Cast goes beyond just using type assertion (though
+it uses that when possible) to provide a very straightforward and convenient
+library.
+
+If you are working with interfaces to handle things like dynamic content
+you’ll need an easy way to convert an interface into a given type. This
+is the library for you.
+
+If you are taking in data from YAML, TOML or JSON or other formats which lack
+full types, then Cast is the library for you.
+
+## Usage
+
+Cast provides a handful of To_____ methods. These methods will always return
+the desired type. **If input is provided that will not convert to that type, the
+0 or nil value for that type will be returned**.
+
+Cast also provides identical methods To_____E. These return the same result as
+the To_____ methods, plus an additional error which tells you if it successfully
+converted. Using these methods you can tell the difference between when the
+input matched the zero value or when the conversion failed and the zero value
+was returned.
+
+The following examples are merely a sample of what is available. Please review
+the code for a complete set.
+
+### Example ‘ToString’:
+
+ cast.ToString("mayonegg") // "mayonegg"
+ cast.ToString(8) // "8"
+ cast.ToString(8.31) // "8.31"
+ cast.ToString([]byte("one time")) // "one time"
+ cast.ToString(nil) // ""
+
+ var foo interface{} = "one more time"
+ cast.ToString(foo) // "one more time"
+
+
+### Example ‘ToInt’:
+
+ cast.ToInt(8) // 8
+ cast.ToInt(8.31) // 8
+ cast.ToInt("8") // 8
+ cast.ToInt(true) // 1
+ cast.ToInt(false) // 0
+
+ var eight interface{} = 8
+ cast.ToInt(eight) // 8
+ cast.ToInt(nil) // 0
+
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
new file mode 100644
index 000000000..0cfe9418d
--- /dev/null
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -0,0 +1,176 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+ v, _ := ToBoolE(i)
+ return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+ v, _ := ToTimeE(i)
+ return v
+}
+
+func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time {
+ v, _ := ToTimeInDefaultLocationE(i, location)
+ return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+ v, _ := ToDurationE(i)
+ return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+ v, _ := ToFloat64E(i)
+ return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+ v, _ := ToFloat32E(i)
+ return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+ v, _ := ToInt64E(i)
+ return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+ v, _ := ToInt32E(i)
+ return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+ v, _ := ToInt16E(i)
+ return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+ v, _ := ToInt8E(i)
+ return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+ v, _ := ToIntE(i)
+ return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+ v, _ := ToUintE(i)
+ return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+ v, _ := ToUint64E(i)
+ return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+ v, _ := ToUint32E(i)
+ return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+ v, _ := ToUint16E(i)
+ return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+ v, _ := ToUint8E(i)
+ return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+ v, _ := ToStringE(i)
+ return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+ v, _ := ToStringMapStringE(i)
+ return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+ v, _ := ToStringMapStringSliceE(i)
+ return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+ v, _ := ToStringMapBoolE(i)
+ return v
+}
+
+// ToStringMapInt casts an interface to a map[string]int type.
+func ToStringMapInt(i interface{}) map[string]int {
+ v, _ := ToStringMapIntE(i)
+ return v
+}
+
+// ToStringMapInt64 casts an interface to a map[string]int64 type.
+func ToStringMapInt64(i interface{}) map[string]int64 {
+ v, _ := ToStringMapInt64E(i)
+ return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+ v, _ := ToStringMapE(i)
+ return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+ v, _ := ToSliceE(i)
+ return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+ v, _ := ToBoolSliceE(i)
+ return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+ v, _ := ToStringSliceE(i)
+ return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+ v, _ := ToIntSliceE(i)
+ return v
+}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+ v, _ := ToDurationSliceE(i)
+ return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 000000000..c04af6a97
--- /dev/null
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1337 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "html/template"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
+func ToTimeE(i interface{}) (tim time.Time, err error) {
+ return ToTimeInDefaultLocationE(i, time.UTC)
+}
+
+// ToTimeInDefaultLocationE casts an empty interface to time.Time,
+// interpreting inputs without a timezone to be in the given location,
+// or the local timezone if nil.
+func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) {
+ i = indirect(i)
+
+ switch v := i.(type) {
+ case time.Time:
+ return v, nil
+ case string:
+ return StringToDateInDefaultLocation(v, location)
+ case int:
+ return time.Unix(int64(v), 0), nil
+ case int64:
+ return time.Unix(v, 0), nil
+ case int32:
+ return time.Unix(int64(v), 0), nil
+ case uint:
+ return time.Unix(int64(v), 0), nil
+ case uint64:
+ return time.Unix(int64(v), 0), nil
+ case uint32:
+ return time.Unix(int64(v), 0), nil
+ default:
+ return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+ }
+}
+
+// ToDurationE casts an interface to a time.Duration type.
+func ToDurationE(i interface{}) (d time.Duration, err error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case time.Duration:
+ return s, nil
+ case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
+ d = time.Duration(ToInt64(s))
+ return
+ case float32, float64:
+ d = time.Duration(ToFloat64(s))
+ return
+ case string:
+ if strings.ContainsAny(s, "nsuµmh") {
+ d, err = time.ParseDuration(s)
+ } else {
+ d, err = time.ParseDuration(s + "ns")
+ }
+ return
+ default:
+ err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
+ return
+ }
+}
+
+// ToBoolE casts an interface to a bool type.
+func ToBoolE(i interface{}) (bool, error) {
+ i = indirect(i)
+
+ switch b := i.(type) {
+ case bool:
+ return b, nil
+ case nil:
+ return false, nil
+ case int:
+ if i.(int) != 0 {
+ return true, nil
+ }
+ return false, nil
+ case string:
+ return strconv.ParseBool(i.(string))
+ default:
+ return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+ }
+}
+
+// ToFloat64E casts an interface to a float64 type.
+func ToFloat64E(i interface{}) (float64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return s, nil
+ case float32:
+ return float64(s), nil
+ case int:
+ return float64(s), nil
+ case int64:
+ return float64(s), nil
+ case int32:
+ return float64(s), nil
+ case int16:
+ return float64(s), nil
+ case int8:
+ return float64(s), nil
+ case uint:
+ return float64(s), nil
+ case uint64:
+ return float64(s), nil
+ case uint32:
+ return float64(s), nil
+ case uint16:
+ return float64(s), nil
+ case uint8:
+ return float64(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ }
+}
+
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return float32(s), nil
+ case float32:
+ return s, nil
+ case int:
+ return float32(s), nil
+ case int64:
+ return float32(s), nil
+ case int32:
+ return float32(s), nil
+ case int16:
+ return float32(s), nil
+ case int8:
+ return float32(s), nil
+ case uint:
+ return float32(s), nil
+ case uint64:
+ return float32(s), nil
+ case uint32:
+ return float32(s), nil
+ case uint16:
+ return float32(s), nil
+ case uint8:
+ return float32(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 32)
+ if err == nil {
+ return float32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ }
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int64(s), nil
+ case int64:
+ return s, nil
+ case int32:
+ return int64(s), nil
+ case int16:
+ return int64(s), nil
+ case int8:
+ return int64(s), nil
+ case uint:
+ return int64(s), nil
+ case uint64:
+ return int64(s), nil
+ case uint32:
+ return int64(s), nil
+ case uint16:
+ return int64(s), nil
+ case uint8:
+ return int64(s), nil
+ case float64:
+ return int64(s), nil
+ case float32:
+ return int64(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ }
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int32(s), nil
+ case int64:
+ return int32(s), nil
+ case int32:
+ return s, nil
+ case int16:
+ return int32(s), nil
+ case int8:
+ return int32(s), nil
+ case uint:
+ return int32(s), nil
+ case uint64:
+ return int32(s), nil
+ case uint32:
+ return int32(s), nil
+ case uint16:
+ return int32(s), nil
+ case uint8:
+ return int32(s), nil
+ case float64:
+ return int32(s), nil
+ case float32:
+ return int32(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ }
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int16(s), nil
+ case int64:
+ return int16(s), nil
+ case int32:
+ return int16(s), nil
+ case int16:
+ return s, nil
+ case int8:
+ return int16(s), nil
+ case uint:
+ return int16(s), nil
+ case uint64:
+ return int16(s), nil
+ case uint32:
+ return int16(s), nil
+ case uint16:
+ return int16(s), nil
+ case uint8:
+ return int16(s), nil
+ case float64:
+ return int16(s), nil
+ case float32:
+ return int16(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ }
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int8(s), nil
+ case int64:
+ return int8(s), nil
+ case int32:
+ return int8(s), nil
+ case int16:
+ return int8(s), nil
+ case int8:
+ return s, nil
+ case uint:
+ return int8(s), nil
+ case uint64:
+ return int8(s), nil
+ case uint32:
+ return int8(s), nil
+ case uint16:
+ return int8(s), nil
+ case uint8:
+ return int8(s), nil
+ case float64:
+ return int8(s), nil
+ case float32:
+ return int8(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ }
+}
+
+// ToIntE casts an interface to an int type.
+func ToIntE(i interface{}) (int, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return s, nil
+ case int64:
+ return int(s), nil
+ case int32:
+ return int(s), nil
+ case int16:
+ return int(s), nil
+ case int8:
+ return int(s), nil
+ case uint:
+ return int(s), nil
+ case uint64:
+ return int(s), nil
+ case uint32:
+ return int(s), nil
+ case uint16:
+ return int(s), nil
+ case uint8:
+ return int(s), nil
+ case float64:
+ return int(s), nil
+ case float32:
+ return int(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ }
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 0)
+ if err == nil {
+ return uint(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case uint:
+ return s, nil
+ case uint64:
+ return uint(s), nil
+ case uint32:
+ return uint(s), nil
+ case uint16:
+ return uint(s), nil
+ case uint8:
+ return uint(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+ }
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case uint:
+ return uint64(s), nil
+ case uint64:
+ return s, nil
+ case uint32:
+ return uint64(s), nil
+ case uint16:
+ return uint64(s), nil
+ case uint8:
+ return uint64(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+ }
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 32)
+ if err == nil {
+ return uint32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case uint:
+ return uint32(s), nil
+ case uint64:
+ return uint32(s), nil
+ case uint32:
+ return s, nil
+ case uint16:
+ return uint32(s), nil
+ case uint8:
+ return uint32(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+ }
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 16)
+ if err == nil {
+ return uint16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case uint:
+ return uint16(s), nil
+ case uint64:
+ return uint16(s), nil
+ case uint32:
+ return uint16(s), nil
+ case uint16:
+ return s, nil
+ case uint8:
+ return uint16(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+ }
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 8)
+ if err == nil {
+ return uint8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case uint:
+ return uint8(s), nil
+ case uint64:
+ return uint8(s), nil
+ case uint32:
+ return uint8(s), nil
+ case uint16:
+ return uint8(s), nil
+ case uint8:
+ return s, nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+ }
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+ if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+ // Avoid creating a reflect.Value if it's not a pointer.
+ return a
+ }
+ v := reflect.ValueOf(a)
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+
+ var errorType = reflect.TypeOf((*error)(nil)).Elem()
+ var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+ v := reflect.ValueOf(a)
+ for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// ToStringE casts an interface to a string type.
+func ToStringE(i interface{}) (string, error) {
+ i = indirectToStringerOrError(i)
+
+ switch s := i.(type) {
+ case string:
+ return s, nil
+ case bool:
+ return strconv.FormatBool(s), nil
+ case float64:
+ return strconv.FormatFloat(s, 'f', -1, 64), nil
+ case float32:
+ return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
+ case int:
+ return strconv.Itoa(s), nil
+ case int64:
+ return strconv.FormatInt(s, 10), nil
+ case int32:
+ return strconv.Itoa(int(s)), nil
+ case int16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case int8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint64:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint32:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint16:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint8:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case []byte:
+ return string(s), nil
+ case template.HTML:
+ return string(s), nil
+ case template.URL:
+ return string(s), nil
+ case template.JS:
+ return string(s), nil
+ case template.CSS:
+ return string(s), nil
+ case template.HTMLAttr:
+ return string(s), nil
+ case nil:
+ return "", nil
+ case fmt.Stringer:
+ return s.String(), nil
+ case error:
+ return s.Error(), nil
+ default:
+ return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
+ }
+}
+
+// ToStringMapStringE casts an interface to a map[string]string type.
+func ToStringMapStringE(i interface{}) (map[string]string, error) {
+ var m = map[string]string{}
+
+ switch v := i.(type) {
+ case map[string]string:
+ return v, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
+ }
+}
+
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
+ var m = map[string][]string{}
+
+ switch v := i.(type) {
+ case map[string][]string:
+ return v, nil
+ case map[string][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[string]string:
+ for k, val := range v {
+ m[ToString(k)] = []string{val}
+ }
+ case map[string]interface{}:
+ for k, val := range v {
+ switch vt := val.(type) {
+ case []interface{}:
+ m[ToString(k)] = ToStringSlice(vt)
+ case []string:
+ m[ToString(k)] = vt
+ default:
+ m[ToString(k)] = []string{ToString(val)}
+ }
+ }
+ return m, nil
+ case map[interface{}][]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ key, err := ToStringE(k)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ value, err := ToStringSliceE(val)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ m[key] = value
+ }
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ return m, nil
+}
+
+// ToStringMapBoolE casts an interface to a map[string]bool type.
+func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
+ var m = map[string]bool{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]bool:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
+ }
+}
+
+// ToStringMapE casts an interface to a map[string]interface{} type.
+func ToStringMapE(i interface{}) (map[string]interface{}, error) {
+ var m = map[string]interface{}{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = val
+ }
+ return m, nil
+ case map[string]interface{}:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
+ }
+}
+
+// ToStringMapIntE casts an interface to a map[string]int{} type.
+func ToStringMapIntE(i interface{}) (map[string]int, error) {
+ var m = map[string]int{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt(val)
+ }
+ return m, nil
+ case map[string]int:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToIntE(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToStringMapInt64E casts an interface to a map[string]int64{} type.
+func ToStringMapInt64E(i interface{}) (map[string]int64, error) {
+ var m = map[string]int64{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]int64:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToInt64E(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToSliceE casts an interface to a []interface{} type.
+func ToSliceE(i interface{}) ([]interface{}, error) {
+ var s []interface{}
+
+ switch v := i.(type) {
+ case []interface{}:
+ return append(s, v...), nil
+ case []map[string]interface{}:
+ for _, u := range v {
+ s = append(s, u)
+ }
+ return s, nil
+ default:
+ return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
+ }
+}
+
+// ToBoolSliceE casts an interface to a []bool type.
+func ToBoolSliceE(i interface{}) ([]bool, error) {
+ if i == nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+
+ switch v := i.(type) {
+ case []bool:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]bool, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToBoolE(s.Index(j).Interface())
+ if err != nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+}
+
+// ToStringSliceE casts an interface to a []string type.
+func ToStringSliceE(i interface{}) ([]string, error) {
+ var a []string
+
+ switch v := i.(type) {
+ case []interface{}:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []string:
+ return v, nil
+ case []int8:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []int:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []int32:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []int64:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []float32:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []float64:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case string:
+ return strings.Fields(v), nil
+ case []error:
+ for _, err := range i.([]error) {
+ a = append(a, err.Error())
+ }
+ return a, nil
+ case interface{}:
+ str, err := ToStringE(v)
+ if err != nil {
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+ return []string{str}, nil
+ default:
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+}
+
+// ToIntSliceE casts an interface to a []int type.
+func ToIntSliceE(i interface{}) ([]int, error) {
+ if i == nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+
+ switch v := i.(type) {
+ case []int:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]int, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToIntE(s.Index(j).Interface())
+ if err != nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+}
+
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+ if i == nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+
+ switch v := i.(type) {
+ case []time.Duration:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]time.Duration, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToDurationE(s.Index(j).Interface())
+ if err != nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats. If no suitable format is found, an error is
+// returned.
+func StringToDate(s string) (time.Time, error) {
+ return parseDateWith(s, time.UTC, timeFormats)
+}
+
+// StringToDateInDefaultLocation casts an empty interface to a time.Time,
+// interpreting inputs without a timezone to be in the given location,
+// or the local timezone if nil.
+func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) {
+ return parseDateWith(s, location, timeFormats)
+}
+
+type timeFormatType int
+
+const (
+ timeFormatNoTimezone timeFormatType = iota
+ timeFormatNamedTimezone
+ timeFormatNumericTimezone
+ timeFormatNumericAndNamedTimezone
+ timeFormatTimeOnly
+)
+
+type timeFormat struct {
+ format string
+ typ timeFormatType
+}
+
+func (f timeFormat) hasTimezone() bool {
+ // We don't include the formats with only named timezones, see
+ // https://github.com/golang/go/issues/19694#issuecomment-289103522
+ return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone
+}
+
+var (
+ timeFormats = []timeFormat{
+ timeFormat{time.RFC3339, timeFormatNumericTimezone},
+ timeFormat{"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
+ timeFormat{time.RFC1123Z, timeFormatNumericTimezone},
+ timeFormat{time.RFC1123, timeFormatNamedTimezone},
+ timeFormat{time.RFC822Z, timeFormatNumericTimezone},
+ timeFormat{time.RFC822, timeFormatNamedTimezone},
+ timeFormat{time.RFC850, timeFormatNamedTimezone},
+ timeFormat{"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
+ timeFormat{"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
+ timeFormat{"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
+ timeFormat{"2006-01-02 15:04:05", timeFormatNoTimezone},
+ timeFormat{time.ANSIC, timeFormatNoTimezone},
+ timeFormat{time.UnixDate, timeFormatNamedTimezone},
+ timeFormat{time.RubyDate, timeFormatNumericTimezone},
+ timeFormat{"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
+ timeFormat{"2006-01-02", timeFormatNoTimezone},
+ timeFormat{"02 Jan 2006", timeFormatNoTimezone},
+ timeFormat{"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
+ timeFormat{"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
+ timeFormat{time.Kitchen, timeFormatTimeOnly},
+ timeFormat{time.Stamp, timeFormatTimeOnly},
+ timeFormat{time.StampMilli, timeFormatTimeOnly},
+ timeFormat{time.StampMicro, timeFormatTimeOnly},
+ timeFormat{time.StampNano, timeFormatTimeOnly},
+ }
+)
+
+func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) {
+
+ for _, format := range formats {
+ if d, e = time.Parse(format.format, s); e == nil {
+
+ // Some time formats have a zone name, but no offset, so it gets
+ // put in that zone name (not the default one passed in to us), but
+ // without that zone's offset. So set the location manually.
+ if format.typ <= timeFormatNamedTimezone {
+ if location == nil {
+ location = time.Local
+ }
+ year, month, day := d.Date()
+ hour, min, sec := d.Clock()
+ d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location)
+ }
+
+ return
+ }
+ }
+ return d, fmt.Errorf("unable to parse date: %s", s)
+}
+
+// jsonStringToObject attempts to unmarshall a string as JSON into
+// the object passed as pointer.
+func jsonStringToObject(s string, v interface{}) error {
+ data := []byte(s)
+ return json.Unmarshal(data, v)
+}
diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go
new file mode 100644
index 000000000..1524fc82c
--- /dev/null
+++ b/vendor/github.com/spf13/cast/timeformattype_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type timeFormatType"; DO NOT EDIT.
+
+package cast
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[timeFormatNoTimezone-0]
+ _ = x[timeFormatNamedTimezone-1]
+ _ = x[timeFormatNumericTimezone-2]
+ _ = x[timeFormatNumericAndNamedTimezone-3]
+ _ = x[timeFormatTimeOnly-4]
+}
+
+const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly"
+
+var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119}
+
+func (i timeFormatType) String() string {
+ if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) {
+ return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]]
+}
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 000000000..c7b459e4d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,39 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+cobra.test
+bin
+
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
new file mode 100644
index 000000000..0d6e61793
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -0,0 +1,48 @@
+run:
+ deadline: 5m
+
+linters:
+ disable-all: true
+ enable:
+ #- bodyclose
+ - deadcode
+ #- depguard
+ #- dogsled
+ #- dupl
+ - errcheck
+ #- exhaustive
+ #- funlen
+ - gas
+ #- gochecknoinits
+ - goconst
+ #- gocritic
+ #- gocyclo
+ #- gofmt
+ - goimports
+ - golint
+ #- gomnd
+ #- goprintffuncname
+ #- gosec
+ #- gosimple
+ - govet
+ - ineffassign
+ - interfacer
+ #- lll
+ - maligned
+ - megacheck
+ #- misspell
+ #- nakedret
+ #- noctx
+ #- nolintlint
+ #- rowserrcheck
+ #- scopelint
+ #- staticcheck
+ - structcheck
+ #- stylecheck
+ #- typecheck
+ - unconvert
+ #- unparam
+ #- unused
+ - varcheck
+ #- whitespace
+ fast: false
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 000000000..94ec53068
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia
+Bjørn Erik Pedersen
+Fabiano Franz
diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md
new file mode 100644
index 000000000..8a23b4f85
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CHANGELOG.md
@@ -0,0 +1,51 @@
+# Cobra Changelog
+
+## v1.1.3
+
+* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility
+
+## v1.1.2
+
+### Notable Changes
+
+* Bump license year to 2021 in golden files (#1309) @Bowbaq
+* Enhance PowerShell completion with custom comp (#1208) @Luap99
+* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670
+* Documentation readability improvements (#1228 etc.) @zaataylor etc.
+* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor
+
+## v1.1.1
+
+* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context.
+* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context.
+
+## v1.1.0
+
+### Notable Changes
+
+* Extend Go completions and revamp zsh comp (#1070)
+* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb
+* Add completion for help command (#1136)
+* Complete subcommands when TraverseChildren is set (#1171)
+* Fix stderr printing functions (#894)
+* fix: fish output redirection (#1247)
+
+## v1.0.0
+
+Announcing v1.0.0 of Cobra. 🎉
+
+### Notable Changes
+* Fish completion (including support for Go custom completion) @marckhouzam
+* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam
+* Remove/replace SetOutput on Command - deprecated @jpmcb
+* add support for autolabel stale PR @xchapter7x
+* Add Labeler Actions @xchapter7x
+* Custom completions coded in Go (instead of Bash) @marckhouzam
+* Partial Revert of #922 @jharshman
+* Add Makefile to project @jharshman
+* Correct documentation for InOrStdin @desponda
+* Apply formatting to templates @jharshman
+* Revert change so help is printed on stdout again @marckhouzam
+* Update md2man to v2.0.0 @pdf
+* update viper to v1.4.0 @umarcor
+* Update cmd/root.go example in README.md @jharshman
diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md
new file mode 100644
index 000000000..9d16f88fd
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONDUCT.md
@@ -0,0 +1,37 @@
+## Cobra User Contract
+
+### Versioning
+Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release.
+
+### Backward Compatibility
+We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released.
+
+### Deprecation
+Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github.
+
+### CVE
+Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one.
+
+### Communication
+Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors.
+
+### Breaking Changes
+Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra.
+
+There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version.
+
+Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release.
+
+Examples of breaking changes include:
+- Removing or renaming exported constant, variable, type, or function.
+- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc...
+ - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing.
+
+There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging.
+
+### CI Testing
+Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang.
+
+### Disclaimer
+Changes to this document and the contents therein are at the discretion of the maintainers.
+None of the contents of this document are legally binding in any way to the maintainers or the users.
diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
new file mode 100644
index 000000000..6f356e6a8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# Contributing to Cobra
+
+Thank you so much for contributing to Cobra. We appreciate your time and help.
+Here are some guidelines to help you get started.
+
+## Code of Conduct
+
+Be kind and respectful to the members of the community. Take time to educate
+others who are seeking help. Harassment of any kind will not be tolerated.
+
+## Questions
+
+If you have questions regarding Cobra, feel free to ask it in the community
+[#cobra Slack channel][cobra-slack]
+
+## Filing a bug or feature
+
+1. Before filing an issue, please check the existing issues to see if a
+ similar one was already opened. If there is one already opened, feel free
+ to comment on it.
+1. If you believe you've found a bug, please provide detailed steps of
+ reproduction, the version of Cobra and anything else you believe will be
+ useful to help troubleshoot it (e.g. OS environment, environment variables,
+ etc...). Also state the current behavior vs. the expected behavior.
+1. If you'd like to see a feature or an enhancement please open an issue with
+ a clear title and description of what the feature is and why it would be
+ beneficial to the project and its users.
+
+## Submitting changes
+
+1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to
+ sign a CLA. Please sign the CLA :slightly_smiling_face:
+1. Tests: If you are submitting code, please ensure you have adequate tests
+ for the feature. Tests can be run via `go test ./...` or `make test`.
+1. Since this is golang project, ensure the new code is properly formatted to
+ ensure code consistency. Run `make all`.
+
+### Quick steps to contribute
+
+1. Fork the project.
+1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+1. Create your feature branch (`git checkout -b my-new-feature`)
+1. Make changes and run tests (`make test`)
+1. Add them to staging (`git add .`)
+1. Commit your changes (`git commit -m 'Add some feature'`)
+1. Push to the branch (`git push origin my-new-feature`)
+1. Create new pull request
+
+
+[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 000000000..472c73bf1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,40 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which golangci-lint))
+$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
+endif
+
+ifeq (, $(shell which richgo))
+$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
+endif
+
+.PHONY: fmt lint test cobra_generator install_deps clean
+
+default: all
+
+all: fmt test cobra_generator
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+lint:
+ $(info ******************** running lint tools ********************)
+ golangci-lint run -v
+
+test: install_deps lint
+ $(info ******************** running tests ********************)
+ richgo test -v ./...
+
+cobra_generator: install_deps
+ $(info ******************** building generator ********************)
+ mkdir -p $(BIN)
+ make -C cobra all
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 000000000..074e3979f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,125 @@
+![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/),
+[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to
+name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
+
+[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
+[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
+[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+ * [Commands](#commands)
+ * [Flags](#flags)
+- [Installing](#installing)
+- [Usage](#usage)
+ * [Using the Cobra Generator](user_guide.md#using-the-cobra-generator)
+ * [Using the Cobra Library](user_guide.md#using-the-cobra-library)
+ * [Working with Flags](user_guide.md#working-with-flags)
+ * [Positional and Custom Arguments](user_guide.md#positional-and-custom-arguments)
+ * [Example](user_guide.md#example)
+ * [Help Command](user_guide.md#help-command)
+ * [Usage Message](user_guide.md#usage-message)
+ * [PreRun and PostRun Hooks](user_guide.md#prerun-and-postrun-hooks)
+ * [Suggestions when "unknown command" happens](user_guide.md#suggestions-when-unknown-command-happens)
+ * [Generating documentation for your command](user_guide.md#generating-documentation-for-your-command)
+ * [Generating shell completions](user_guide.md#generating-shell-completions)
+- [Contributing](CONTRIBUTING.md)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications read like sentences when used, and as a result, users
+intuitively know how to interact with them.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+ or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+ go get -u github.com/spf13/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Usage
+
+See [User Guide](user_guide.md).
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 000000000..70e9b2629
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,109 @@
+package cobra
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ var validArgs []string
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.Split(v, "\t")[0])
+ }
+
+ for _, v := range args {
+ if !stringInSlice(v, validArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactValidArgs returns an error if
+// there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+func ExactValidArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if err := ExactArgs(n)(cmd, args); err != nil {
+ return err
+ }
+ return OnlyValidArgs(cmd, args)
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 000000000..733f4d121
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,685 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf io.StringWriter, name string) {
+ WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+ # Do not use quotes around the $out variable or else newline
+ # characters will be kept.
+ for filter in ${out[*]}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subDir
+ # Use printf to strip any trailing newline
+ subdir=$(printf "%%s" "${out[0]}")
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ __%[1]s_handle_subdirs_in_dir_flag "$subdir"
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out[*]}" -- "$cur")
+ fi
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ local comp
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+ return 0;
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ __%[1]s_handle_go_custom_completion
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name, ShellCompNoDescRequestCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+func writePostscript(buf io.StringWriter, name string) {
+ name = strings.Replace(name, ":", "__", -1)
+ WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`{
+ local cur prev words cword split
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local command_aliases=()
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local has_completion_function
+ local last_command
+ local nouns=()
+ local noun_aliases=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ WriteStringAndCheck(buf, " flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+const cbn = "\")\n"
+
+func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.NoOptDefVal) == 0 {
+ format = " two_word_flags+=(\"--%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ }
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn
+ if len(flag.NoOptDefVal) == 0 {
+ format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
+ }
+}
+
+// Setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
+func writeFlags(buf io.StringWriter, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
+ WriteStringAndCheck(buf, ` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ // localNonPersistentFlags are used to stop the completion of subcommands when one is set
+ // if TraverseChildren is true we should allow to complete subcommands
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeRequiredFlag(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ for key := range flag.Annotations {
+ switch key {
+ case BashCompOneRequiredFlag:
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand))
+ }
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_noun=()\n")
+ sort.Strings(cmd.ValidArgs)
+ for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.Split(value, "\t")[0]
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+ if cmd.ValidArgsFunction != nil {
+ WriteStringAndCheck(buf, " has_completion_function=1\n")
+ }
+}
+
+func writeCmdAliases(buf io.StringWriter, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Strings(cmd.Aliases)
+
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ WriteStringAndCheck(buf, ` fi`)
+ WriteStringAndCheck(buf, "\n")
+}
+func writeArgAliases(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " noun_aliases=()\n")
+ sort.Strings(cmd.ArgAliases)
+ for _, value := range cmd.ArgAliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf io.StringWriter, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.Replace(commandName, " ", "_", -1)
+ commandName = strings.Replace(commandName, ":", "__", -1)
+
+ if cmd.Root() == cmd {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName))
+ WriteStringAndCheck(buf, "\n")
+ WriteStringAndCheck(buf, " command_aliases=()\n")
+ WriteStringAndCheck(buf, "\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ WriteStringAndCheck(buf, "}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
new file mode 100644
index 000000000..52919b2fa
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -0,0 +1,93 @@
+# Generating Bash Completions For Your cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md) for details.
+
+## Bash legacy dynamic completions
+
+For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution.
+
+**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own.
+
+The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions.
+
+Some code that works in kubernetes:
+
+```bash
+const (
+ bash_completion_func = `__kubectl_parse_get()
+{
+ local kubectl_output out
+ if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
+ out=($(echo "${kubectl_output}" | awk '{print $1}'))
+ COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
+ fi
+}
+
+__kubectl_get_resource()
+{
+ if [[ ${#nouns[@]} -eq 0 ]]; then
+ return 1
+ fi
+ __kubectl_parse_get ${nouns[${#nouns[@]} -1]}
+ if [[ $? -eq 0 ]]; then
+ return 0
+ fi
+}
+
+__kubectl_custom_func() {
+ case ${last_command} in
+ kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
+ __kubectl_get_resource
+ return
+ ;;
+ *)
+ ;;
+ esac
+}
+`)
+```
+
+And then I set that in my command definition:
+
+```go
+cmds := &cobra.Command{
+ Use: "kubectl",
+ Short: "kubectl controls the Kubernetes cluster manager",
+ Long: `kubectl controls the Kubernetes cluster manager.
+
+Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
+ Run: runHelp,
+ BashCompletionFunction: bash_completion_func,
+}
+```
+
+The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
+
+Similarly, for flags:
+
+```go
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
+
+ flag := &pflag.Flag{
+ Name: "namespace",
+ Usage: usage,
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
+value, e.g.:
+
+```bash
+__kubectl_get_namespaces()
+{
+ local template
+ template="{{ range .items }}{{ .metadata.name }} {{ end }}"
+ local kubectl_out
+ if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
+ COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
+ fi
+}
+```
diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go
new file mode 100644
index 000000000..8859b57c4
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -0,0 +1,302 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genBashComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Macs have bash3 for which the bash-completion package doesn't include
+# _init_completion. This is a minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+# This function calls the %[1]s program to obtain the completion
+# results and the directive. It fills the 'out' and 'directive' vars.
+__%[1]s_get_completion_results() {
+ local requestComp lastParam lastChar args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} ''"
+ fi
+
+ # When completing a flag with an = (e.g., %[1]s -n=)
+ # bash focuses on the part after the =, so we need to remove
+ # the flag part from $cur
+ if [[ "${cur}" == -*=* ]]; then
+ cur="${cur#*=}"
+ fi
+
+ __%[1]s_debug "Calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "The completion directive is: ${directive}"
+ __%[1]s_debug "The completions are: ${out[*]}"
+}
+
+__%[1]s_process_completion_results() {
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ else
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "Activating no space"
+ compopt -o nospace
+ else
+ __%[1]s_debug "No space directive not supported in this version of bash"
+ fi
+ fi
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "Activating no file completion"
+ compopt +o default
+ else
+ __%[1]s_debug "No file completion directive not supported in this version of bash"
+ fi
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+
+ # Do not use quotes around the $out variable or else newline
+ # characters will be kept.
+ for filter in ${out[*]}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+
+ # Use printf to strip any trailing newline
+ local subdir
+ subdir=$(printf "%%s" "${out[0]}")
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ __%[1]s_handle_standard_completion_case
+ fi
+
+ __%[1]s_handle_special_char "$cur" :
+ __%[1]s_handle_special_char "$cur" =
+}
+
+__%[1]s_handle_standard_completion_case() {
+ local tab comp
+ tab=$(printf '\t')
+
+ local longest=0
+ # Look for the longest completion so that we can format things nicely
+ while IFS='' read -r comp; do
+ # Strip any description before checking the length
+ comp=${comp%%%%$tab*}
+ # Only consider the completions that match
+ comp=$(compgen -W "$comp" -- "$cur")
+ if ((${#comp}>longest)); then
+ longest=${#comp}
+ fi
+ done < <(printf "%%s\n" "${out[@]}")
+
+ local completions=()
+ while IFS='' read -r comp; do
+ if [ -z "$comp" ]; then
+ continue
+ fi
+
+ __%[1]s_debug "Original comp: $comp"
+ comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")"
+ __%[1]s_debug "Final comp: $comp"
+ completions+=("$comp")
+ done < <(printf "%%s\n" "${out[@]}")
+
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ # If there is a single completion left, remove the description text
+ if [ ${#COMPREPLY[*]} -eq 1 ]; then
+ __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
+ comp="${COMPREPLY[0]%%%% *}"
+ __%[1]s_debug "Removed description from single completion, which is now: ${comp}"
+ COMPREPLY=()
+ COMPREPLY+=("$comp")
+ fi
+}
+
+__%[1]s_handle_special_char()
+{
+ local comp="$1"
+ local char=$2
+ if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
+ local word=${comp%%"${comp##*${char}}"}
+ local idx=${#COMPREPLY[*]}
+ while [[ $((--idx)) -ge 0 ]]; do
+ COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"}
+ done
+ fi
+}
+
+__%[1]s_format_comp_descriptions()
+{
+ local tab
+ tab=$(printf '\t')
+ local comp="$1"
+ local longest=$2
+
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if [[ $maxdesclength -gt 8 ]]; then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
+
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if [ $maxdesclength -gt 0 ]; then
+ if [ ${#desc} -gt $maxdesclength ]; then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
+ fi
+ fi
+
+ # Must use printf to escape all special characters
+ printf "%%q" "${comp}"
+}
+
+__start_%[1]s()
+{
+ local cur prev words cword split
+
+ COMPREPLY=()
+
+ # Call _init_completion from the bash-completion package
+ # to prepare the arguments properly
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -n "=:" || return
+ else
+ __%[1]s_init_completion -n "=:" || return
+ fi
+
+ __%[1]s_debug
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $cword location, so we need
+ # to truncate the command-line ($words) up to the $cword location.
+ words=("${words[@]:0:$cword+1}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ local out directive
+ __%[1]s_get_completion_results
+ __%[1]s_process_completion_results
+}
+
+if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%[1]s %[1]s
+else
+ complete -o default -o nospace -F __start_%[1]s %[1]s
+fi
+
+# ex: ts=4 sw=4 et filetype=sh
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+// GenBashCompletionFileV2 generates Bash completion version 2.
+func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletionV2(outFile, includeDesc)
+}
+
+// GenBashCompletionV2 generates Bash completion file version 2
+// and writes it to the passed writer.
+func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {
+ return c.genBashCompletion(w, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 000000000..d6cbfd719
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,222 @@
+// Copyright © 2013 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration = 5 * time.Second
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ template := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ }
+ for i := range d {
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing.
+func CheckErr(msg interface{}) {
+ if msg != nil {
+ fmt.Fprintln(os.Stderr, "Error:", msg)
+ os.Exit(1)
+ }
+}
+
+// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil.
+func WriteStringAndCheck(b io.StringWriter, s string) {
+ _, err := b.WriteString(s)
+ CheckErr(err)
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 000000000..2cc18891d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1680 @@
+// Copyright © 2013 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ // Recommended syntax is as follow:
+ // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
+ // ... indicates that you can specify multiple values for the previous argument.
+ // | indicates mutually exclusive information. You can use the argument to the left of the separator or the
+ // argument to the right of the separator. You cannot use both arguments in a single use of the command.
+ // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
+ // optional, they are enclosed in brackets ([ ]).
+ // Example: add [-F file | -D dir]... [-f format] profile
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // Long is the long message shown in the 'help ' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
+ ValidArgs []string
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the shell completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
+ // For portability with other shells, it is recommended to instead use ValidArgsFunction
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+
+ // inReader is a reader defined by the user that replaces stdin
+ inReader io.Reader
+ // outWriter is a writer defined by the user that replaces stdout
+ outWriter io.Writer
+ // errWriter is a writer defined by the user that replaces stderr
+ errWriter io.Writer
+
+ //FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // CompletionOptions is a set of options to control the handling of shell completion
+ CompletionOptions CompletionOptions
+
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ ctx context.Context
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+}
+
+// Context returns underlying command context. If command wasn't
+// executed with ExecuteContext Context returns Background context.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+// Deprecated: Use SetOut and/or SetErr instead
+func (c *Command) SetOutput(output io.Writer) {
+ c.outWriter = output
+ c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+ c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+ c.errWriter = newErr
+}
+
+// SetIn sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+ c.inReader = newIn
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+ return c.getErr(os.Stderr)
+}
+
+// InOrStdin returns input to stdin
+func (c *Command) InOrStdin() io.Reader {
+ return c.getIn(os.Stdin)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.outWriter != nil {
+ return c.outWriter
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+func (c *Command) getErr(def io.Writer) io.Writer {
+ if c.errWriter != nil {
+ return c.errWriter
+ }
+ if c.HasParent() {
+ return c.parent.getErr(def)
+ }
+ return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+ if c.inReader != nil {
+ return c.inReader
+ }
+ if c.HasParent() {
+ return c.parent.getIn(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString returns usage string.
+func (c *Command) UsageString() string {
+ // Storing normal writers
+ tmpOutput := c.outWriter
+ tmpErr := c.errWriter
+
+ bb := new(bytes.Buffer)
+ c.outWriter = bb
+ c.errWriter = bb
+
+ CheckErr(c.Usage())
+
+ // Setting things back to normal
+ c.outWriter = tmpOutput
+ c.errWriter = tmpErr
+
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+ for i, y := range args {
+ if x == y {
+ ret := []string{}
+ ret = append(ret, args[:i]...)
+ ret = append(ret, args[i+1:]...)
+ return ret
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[1] == '-') ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ suggestionsString := ""
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ suggestionsString += "\n\nDid you mean this?\n"
+ for _, s := range suggestions {
+ suggestionsString += fmt.Sprintf("\t%v\n", s)
+ }
+ }
+ return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if cmd.Name() == next || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ return matches[0]
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("Called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ break
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.validateRequiredFlags(); err != nil {
+ return err
+ }
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ break
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) {
+ c.ctx = ctx
+ return c.ExecuteC()
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help at the last point to allow for user overriding
+ c.InitDefaultHelpCmd()
+ // initialize completion at the last point to allow for user overriding
+ c.initDefaultCompletionCmd()
+
+ args := c.args
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ }
+
+ // initialize the hidden command to be used for shell completion
+ c.initCompleteCmd(args)
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.PrintErrln("Error:", err.Error())
+ c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if err == flag.ErrHelp {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilenceErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.PrintErrln("Error:", err.Error())
+ }
+
+ // If root command has SilenceUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return nil
+ }
+ return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+ ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ var completions []string
+ cmd, _, e := c.Root().Find(args)
+ if e != nil {
+ return nil, ShellCompDirectiveNoFileComp
+ }
+ if cmd == nil {
+ // Root help command.
+ cmd = c.Root()
+ }
+ for _, subCmd := range cmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ }
+ }
+ return completions, ShellCompDirectiveNoFileComp
+ },
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ CheckErr(c.Root().Usage())
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ CheckErr(cmd.Help())
+ }
+ },
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+ fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+ c.PrintErr(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+ c.PrintErr(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + c.Use
+ } else {
+ useline = c.Use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if a == s {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parent commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ // do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 000000000..6159c1cc1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 000000000..8768b1736
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,26 @@
+// +build windows
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ if MousetrapDisplayDuration > 0 {
+ time.Sleep(MousetrapDisplayDuration)
+ } else {
+ c.Println("Press return to continue...")
+ fmt.Scanln()
+ }
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go
new file mode 100644
index 000000000..b849b9c84
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -0,0 +1,781 @@
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it.
+var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+
+// lock for reading and writing from flagCompletionFunctions
+var flagCompletionMutex = &sync.RWMutex{}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+type flagCompError struct {
+ subCommand string
+ flagName string
+}
+
+func (e *flagCompError) Error() string {
+ return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'"
+}
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveFilterFileExt indicates that the provided completions
+ // should be used as file extension filters.
+ // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
+ // is a shortcut to using this directive explicitly. The BashCompFilenameExt
+ // annotation can also be used to obtain the same behavior for flags.
+ ShellCompDirectiveFilterFileExt
+
+ // ShellCompDirectiveFilterDirs indicates that only directory names should
+ // be provided in file completion. To request directory names within another
+ // directory, the returned completions should specify the directory within
+ // which to search. The BashCompSubdirsInDir annotation can be used to
+ // obtain the same behavior but only for flags.
+ ShellCompDirectiveFilterDirs
+
+ // ===========================================================================
+
+ // All directives using iota should be above this one.
+ // For internal use.
+ shellCompDirectiveMaxValue
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ // This one must be last to avoid messing up the iota count.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+const (
+ // Constants for the completion command
+ compCmdName = "completion"
+ compCmdNoDescFlagName = "no-descriptions"
+ compCmdNoDescFlagDesc = "disable completion descriptions"
+ compCmdNoDescFlagDefault = false
+)
+
+// CompletionOptions are the options to control shell completion
+type CompletionOptions struct {
+ // DisableDefaultCmd prevents Cobra from creating a default 'completion' command
+ DisableDefaultCmd bool
+ // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
+ // for shells that support completion descriptions
+ DisableNoDescFlag bool
+ // DisableDescriptions turns off all completion descriptions for shells
+ // that support them
+ DisableDescriptions bool
+}
+
+// NoFileCompletions can be used to disable file completion for commands that should
+// not trigger file completions.
+func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return nil, ShellCompDirectiveNoFileComp
+}
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ flagCompletionMutex.Lock()
+ defer flagCompletionMutex.Unlock()
+
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if d&ShellCompDirectiveFilterFileExt != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterFileExt")
+ }
+ if d&ShellCompDirectiveFilterDirs != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterDirs")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d >= shellCompDirectiveMaxValue {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// Adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
+ for _, comp := range completions {
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.Split(comp, "\t")[0]
+ }
+
+ // Make sure we only write the first line to the output.
+ // This is needed if a description contains a linebreak.
+ // Otherwise the shell scripts will interpret the other lines as new flags
+ // and could therefore provide a wrong completion.
+ comp = strings.Split(comp, "\n")[0]
+
+ // Finally trim the completion. This is especially important to get rid
+ // of a trailing tab when there are no description following it.
+ // For example, a sub-command without a description should not be completed
+ // with a tab at the end (or else zsh will show a -- following it
+ // although there is no description).
+ comp = strings.TrimSpace(comp)
+
+ // Print each possible completion to stdout for the completion script to consume.
+ fmt.Fprintln(finalCmd.OutOrStdout(), comp)
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ var finalCmd *Command
+ var finalArgs []string
+ var err error
+ // Find the real command for which completion must be performed
+ // check if we need to traverse here to parse local flags on parent commands
+ if c.Root().TraverseChildren {
+ finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
+ } else {
+ finalCmd, finalArgs, err = c.Root().Find(trimmedArgs)
+ }
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
+ }
+ finalCmd.ctx = c.ctx
+
+ // Check if we are doing flag value completion before parsing the flags.
+ // This is important because if we are completing a flag value, we need to also
+ // remove the flag name argument from the list of finalArgs or else the parsing
+ // could fail due to an invalid value (incomplete) for the flag.
+ flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+
+ // Check if interspersed is false or -- was set on a previous arg.
+ // This works by counting the arguments. Normally -- is not counted as arg but
+ // if -- was already set or interspersed is false and there is already one arg then
+ // the extra added -- is counted as arg.
+ flagCompletion := true
+ _ = finalCmd.ParseFlags(append(finalArgs, "--"))
+ newArgCount := finalCmd.Flags().NArg()
+
+ // Parse the flags early so we can check if required flags are set
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ realArgCount := finalCmd.Flags().NArg()
+ if newArgCount > realArgCount {
+ // don't do flag completion (see above)
+ flagCompletion = false
+ }
+ // Error while attempting to parse flags
+ if flagErr != nil {
+ // If error type is flagCompError and we don't want flagCompletion we should ignore the error
+ if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr
+ }
+ }
+
+ if flag != nil && flagCompletion {
+ // Check if we are completing a flag value subject to annotations
+ if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
+ if len(validExts) != 0 {
+ // File completion filtered by extensions
+ return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
+ }
+
+ // The annotation requests simple file completion. There is no reason to do
+ // that since it is the default behavior anyway. Let's ignore this annotation
+ // in case the program also registered a completion function for this flag.
+ // Even though it is a mistake on the program's side, let's be nice when we can.
+ }
+
+ if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
+ if len(subDir) == 1 {
+ // Directory completion from within a directory
+ return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
+ }
+ // Directory completion
+ return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil
+ }
+ }
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag name to be complete
+ if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
+ var completions []string
+
+ // First check for required flags
+ completions = completeRequireFlags(finalCmd, toComplete)
+
+ // If we have not found any required flags, only then can we show regular flags
+ if len(completions) == 0 {
+ doCompleteFlags := func(flag *pflag.Flag) {
+ if !flag.Changed ||
+ strings.Contains(flag.Value.Type(), "Slice") ||
+ strings.Contains(flag.Value.Type(), "Array") {
+ // If the flag is not already present, or if it can be specified multiple times (Array or Slice)
+ // we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ }
+
+ directive := ShellCompDirectiveNoFileComp
+ if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
+ // If there is a single completion, the shell usually adds a space
+ // after the completion. We don't want that if the flag ends with an =
+ directive = ShellCompDirectiveNoSpace
+ }
+ return finalCmd, completions, directive, nil
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ var completions []string
+ directive := ShellCompDirectiveDefault
+ if flag == nil {
+ foundLocalNonPersistentFlag := false
+ // If TraverseChildren is true on the root command we don't check for
+ // local flags because we can use a local flag on a parent command
+ if !finalCmd.Root().TraverseChildren {
+ // Check if there are any local, non-persistent flags on the command-line
+ localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
+ foundLocalNonPersistentFlag = true
+ }
+ })
+ }
+
+ // Complete subcommand names, including the help command
+ if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
+ // We only complete sub-commands if:
+ // - there are no arguments on the command-line and
+ // - there are no local, non-persistent flags on the command-line or TraverseChildren is true
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ directive = ShellCompDirectiveNoFileComp
+ }
+ }
+ }
+
+ // Complete required flags even without the '-' prefix
+ completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
+
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ if len(finalCmd.ValidArgs) > 0 {
+ if len(finalArgs) == 0 {
+ // ValidArgs are only for the first argument
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+ directive = ShellCompDirectiveNoFileComp
+
+ // If no completions were found within commands or ValidArgs,
+ // see if there are any ArgAliases that should be completed.
+ if len(completions) == 0 {
+ for _, argAlias := range finalCmd.ArgAliases {
+ if strings.HasPrefix(argAlias, toComplete) {
+ completions = append(completions, argAlias)
+ }
+ }
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, directive, nil
+ }
+
+ // Let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+ if flag != nil && flagCompletion {
+ flagCompletionMutex.RLock()
+ completionFn = flagCompletionFunctions[flag]
+ flagCompletionMutex.RUnlock()
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn != nil {
+ // Go custom completion defined for this flag or command.
+ // Call the registered completion function to get the completions.
+ var comps []string
+ comps, directive = completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ }
+
+ return finalCmd, completions, directive, nil
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
+ if nonCompletableFlag(flag) {
+ return []string{}
+ }
+
+ var completions []string
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+
+ // Why suggest both long forms: --flag and --flag= ?
+ // This forces the user to *always* have to type either an = or a space after the flag name.
+ // Let's be nice and avoid making users have to do that.
+ // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
+ // The = form will still work, we just won't suggest it.
+ // This also makes the list of suggested flags shorter as we avoid all the = forms.
+ //
+ // if len(flag.NoOptDefVal) == 0 {
+ // // Flag requires a value, so it can be suffixed with =
+ // flagName += "="
+ // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ // }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func completeRequireFlags(finalCmd *Command, toComplete string) []string {
+ var completions []string
+
+ doCompleteRequiredFlags := func(flag *pflag.Flag) {
+ if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
+ if !flag.Changed {
+ // If the flag is not already present, we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ if finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ return nil, args, lastArg, nil
+ }
+
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+ orgLastArg := lastArg
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as that function
+ // requires the flag name to be complete
+ if len(lastArg) > 0 && lastArg[0] == '-' {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ // Flag with an =
+ if strings.HasPrefix(lastArg[:index], "--") {
+ // Flag has full name
+ flagName = lastArg[2:index]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = lastArg[index-1 : index]
+ }
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ // Normal flag completion
+ return nil, args, lastArg, nil
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ if strings.HasPrefix(prevArg, "--") {
+ // Flag has full name
+ flagName = prevArg[2:]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = prevArg[len(prevArg)-1:]
+ }
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, the interspersed option might be set so return the original args
+ return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName}
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+// initDefaultCompletionCmd adds a default 'completion' command to c.
+// This function will do nothing if any of the following is true:
+// 1- the feature has been explicitly disabled by the program,
+// 2- c has no subcommands (to avoid creating one),
+// 3- c already has a 'completion' command provided by the program.
+func (c *Command) initDefaultCompletionCmd() {
+ if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() {
+ return
+ }
+
+ for _, cmd := range c.commands {
+ if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) {
+ // A completion command is already available
+ return
+ }
+ }
+
+ haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions
+
+ completionCmd := &Command{
+ Use: compCmdName,
+ Short: "generate the autocompletion script for the specified shell",
+ Long: fmt.Sprintf(`
+Generate the autocompletion script for %[1]s for the specified shell.
+See each sub-command's help for details on how to use the generated script.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ }
+ c.AddCommand(completionCmd)
+
+ out := c.OutOrStdout()
+ noDesc := c.CompletionOptions.DisableDescriptions
+ shortDesc := "generate the autocompletion script for %s"
+ bash := &Command{
+ Use: "bash",
+ Short: fmt.Sprintf(shortDesc, "bash"),
+ Long: fmt.Sprintf(`
+Generate the autocompletion script for the bash shell.
+
+This script depends on the 'bash-completion' package.
+If it is not installed already, you can install it via your OS's package manager.
+
+To load completions in your current shell session:
+$ source <(%[1]s completion bash)
+
+To load completions for every new session, execute once:
+Linux:
+ $ %[1]s completion bash > /etc/bash_completion.d/%[1]s
+MacOS:
+ $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s
+
+You will need to start a new shell for this setup to take effect.
+ `, c.Root().Name()),
+ Args: NoArgs,
+ DisableFlagsInUseLine: true,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenBashCompletionV2(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ zsh := &Command{
+ Use: "zsh",
+ Short: fmt.Sprintf(shortDesc, "zsh"),
+ Long: fmt.Sprintf(`
+Generate the autocompletion script for the zsh shell.
+
+If shell completion is not already enabled in your environment you will need
+to enable it. You can execute the following once:
+
+$ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions for every new session, execute once:
+# Linux:
+$ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
+# macOS:
+$ %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenZshCompletionNoDesc(out)
+ }
+ return cmd.Root().GenZshCompletion(out)
+ },
+ }
+ if haveNoDescFlag {
+ zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ fish := &Command{
+ Use: "fish",
+ Short: fmt.Sprintf(shortDesc, "fish"),
+ Long: fmt.Sprintf(`
+Generate the autocompletion script for the fish shell.
+
+To load completions in your current shell session:
+$ %[1]s completion fish | source
+
+To load completions for every new session, execute once:
+$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenFishCompletion(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ powershell := &Command{
+ Use: "powershell",
+ Short: fmt.Sprintf(shortDesc, "powershell"),
+ Long: fmt.Sprintf(`
+Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+PS C:\> %[1]s completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenPowerShellCompletion(out)
+ }
+ return cmd.Root().GenPowerShellCompletionWithDesc(out)
+
+ },
+ }
+ if haveNoDescFlag {
+ powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ completionCmd.AddCommand(bash, zsh, fish, powershell)
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ WriteStringAndCheck(f, msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprint(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 000000000..bb57fd568
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,219 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.Replace(nameForVar, "-", "_", -1)
+ nameForVar = strings.Replace(nameForVar, ":", "_", -1)
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+function __%[1]s_debug
+ set -l file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion"
+
+ # Extract all args except the last one
+ set -l args (commandline -opc)
+ # Extract the last arg and escape it in case it is a space
+ set -l lastArg (string escape -- (commandline -ct))
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ set -l requestComp "$args[1] %[3]s $args[2..-1] $lastArg"
+
+ __%[1]s_debug "Calling $requestComp"
+ set -l results (eval $requestComp 2> /dev/null)
+
+ # Some programs may output extra empty lines after the directive.
+ # Let's ignore them or else it will break completion.
+ # Ref: https://github.com/spf13/cobra/issues/1279
+ for line in $results[-1..1]
+ if test (string trim -- $line) = ""
+ # Found an empty line, remove it
+ set results $results[1..-2]
+ else
+ # Found non-empty line, we have our proper output
+ break
+ end
+ end
+
+ set -l comps $results[1..-2]
+ set -l directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=)
+ # completions must be prefixed with the flag
+ set -l flagPrefix (string match -r -- '-.*=' "$lastArg")
+
+ __%[1]s_debug "Comps: $comps"
+ __%[1]s_debug "DirectiveLine: $directiveLine"
+ __%[1]s_debug "flagPrefix: $flagPrefix"
+
+ for comp in $comps
+ printf "%%s%%s\n" "$flagPrefix" "$comp"
+ end
+
+ printf "%%s\n" "$directiveLine"
+end
+
+# This function does two things:
+# - Obtain the completions and store them in the global __%[1]s_comp_results
+# - Return false if file completion should be performed
+function __%[1]s_prepare_completions
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+
+ # Start fresh
+ set --erase __%[1]s_comp_results
+
+ set -l results (__%[1]s_perform_completion)
+ __%[1]s_debug "Completion results: $results"
+
+ if test -z "$results"
+ __%[1]s_debug "No completion, probably due to a failure"
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $results[-1])
+ set --global __%[1]s_comp_results $results[1..-2]
+
+ __%[1]s_debug "Completions are: $__%[1]s_comp_results"
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveError %[4]d
+ set -l shellCompDirectiveNoSpace %[5]d
+ set -l shellCompDirectiveNoFileComp %[6]d
+ set -l shellCompDirectiveFilterFileExt %[7]d
+ set -l shellCompDirectiveFilterDirs %[8]d
+
+ if test -z "$directive"
+ set directive 0
+ end
+
+ set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
+ if test $compErr -eq 1
+ __%[1]s_debug "Received error directive: aborting."
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
+ set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
+ if test $filefilter -eq 1; or test $dirfilter -eq 1
+ __%[1]s_debug "File extension filtering or directory filtering not supported"
+ # Do full file completion instead
+ return 1
+ end
+
+ set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
+ set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
+
+ __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
+
+ # If we want to prevent a space, or if file completion is NOT disabled,
+ # we need to count the number of valid completions.
+ # To do so, we will filter on prefix as the completions we have received
+ # may not already be filtered so as to allow fish to match on different
+ # criteria than the prefix.
+ if test $nospace -ne 0; or test $nofiles -eq 0
+ set -l prefix (commandline -t | string escape --style=regex)
+ __%[1]s_debug "prefix: $prefix"
+
+ set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results)
+ set --global __%[1]s_comp_results $completions
+ __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results"
+
+ # Important not to quote the variable for count to work
+ set -l numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # We must first split on \t to get rid of the descriptions to be
+ # able to check what the actual completion will be.
+ # We don't need descriptions anyway since there is only a single
+ # real completion which the shell will expand immediately.
+ set -l split (string split --max 1 \t $__%[1]s_comp_results[1])
+
+ # Fish won't add a space if the completion ends with any
+ # of the following characters: @=/:.,
+ set -l lastChar (string sub -s -1 -- $split)
+ if not string match -r -q "[@=/:.,]" -- "$lastChar"
+ # In other cases, to support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --global __%[1]s_comp_results $split[1] $split[1].
+ __%[1]s_debug "Completions are now: $__%[1]s_comp_results"
+ end
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ # To be consistent with bash and zsh, we only trigger file
+ # completion when there are no other completions
+ __%[1]s_debug "Requesting file completion"
+ return 1
+ end
+ end
+
+ return 0
+end
+
+# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
+# so we can properly delete any completions provided by another script.
+# Only do this if the program can be found, or else fish may print some errors; besides,
+# the existing completions will only be loaded if the program can be found.
+if type -q "%[2]s"
+ # The space after the program name is essential to trigger completion for the program
+ # and not completion of the program name itself.
+ # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
+ complete --do-complete "%[2]s " > /dev/null 2>&1
+end
+
+# Remove any pre-existing completions for the program since we will be handling all of them.
+complete -c %[2]s -e
+
+# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
+# which provides the program's completion choices.
+complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+
+`, nameForVar, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+// GenFishCompletion generates fish completion file and writes to the passed writer.
+func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genFishComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenFishCompletionFile generates fish completion file.
+func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenFishCompletion(outFile, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md
new file mode 100644
index 000000000..19b2ed129
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.md
@@ -0,0 +1,4 @@
+## Generating Fish Completions For Your cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md) for details.
+
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 000000000..59234c09f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,285 @@
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*-
+
+function __%[1]s_debug {
+ if ($env:BASH_COMP_DEBUG_FILE) {
+ "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE"
+ }
+}
+
+filter __%[1]s_escapeStringWithSpecialChars {
+`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
+}
+
+Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
+ param(
+ $WordToComplete,
+ $CommandAst,
+ $CursorPosition
+ )
+
+ # Get the current command line and convert into a string
+ $Command = $CommandAst.CommandElements
+ $Command = "$Command"
+
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CursorPosition location, so we need
+ # to truncate the command-line ($Command) up to the $CursorPosition location.
+ # Make sure the $Command is longer then the $CursorPosition before we truncate.
+ # This happens because the $Command does not include the last space.
+ if ($Command.Length -gt $CursorPosition) {
+ $Command=$Command.Substring(0,$CursorPosition)
+ }
+ __%[1]s_debug "Truncated command: $Command"
+
+ $ShellCompDirectiveError=%[3]d
+ $ShellCompDirectiveNoSpace=%[4]d
+ $ShellCompDirectiveNoFileComp=%[5]d
+ $ShellCompDirectiveFilterFileExt=%[6]d
+ $ShellCompDirectiveFilterDirs=%[7]d
+
+ # Prepare the command to request completions for the program.
+ # Split the command at the first space to separate the program and arguments.
+ $Program,$Arguments = $Command.Split(" ",2)
+ $RequestComp="$Program %[2]s $Arguments"
+ __%[1]s_debug "RequestComp: $RequestComp"
+
+ # we cannot use $WordToComplete because it
+ # has the wrong values if the cursor was moved
+ # so use the last argument
+ if ($WordToComplete -ne "" ) {
+ $WordToComplete = $Arguments.Split(" ")[-1]
+ }
+ __%[1]s_debug "New WordToComplete: $WordToComplete"
+
+
+ # Check for flag with equal sign
+ $IsEqualFlag = ($WordToComplete -Like "--*=*" )
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Completing equal sign flag"
+ # Remove the flag part
+ $Flag,$WordToComplete = $WordToComplete.Split("=",2)
+ }
+
+ if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) {
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+`
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
+ }
+
+ __%[1]s_debug "Calling $RequestComp"
+ #call the command store the output in $out and redirect stderr and stdout to null
+ # $Out is an array contains each line per element
+ Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
+
+
+ # get directive from last line
+ [int]$Directive = $Out[-1].TrimStart(':')
+ if ($Directive -eq "") {
+ # There is no directive specified
+ $Directive = 0
+ }
+ __%[1]s_debug "The completion directive is: $Directive"
+
+ # remove directive (last element) from out
+ $Out = $Out | Where-Object { $_ -ne $Out[-1] }
+ __%[1]s_debug "The completions are: $Out"
+
+ if (($Directive -band $ShellCompDirectiveError) -ne 0 ) {
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ }
+
+ $Longest = 0
+ $Values = $Out | ForEach-Object {
+ #Split the output in name and description
+`+" $Name, $Description = $_.Split(\"`t\",2)"+`
+ __%[1]s_debug "Name: $Name Description: $Description"
+
+ # Look for the longest completion so that we can format things nicely
+ if ($Longest -lt $Name.Length) {
+ $Longest = $Name.Length
+ }
+
+ # Set the description to a one space string if there is none set.
+ # This is needed because the CompletionResult does not accept an empty string as argument
+ if (-Not $Description) {
+ $Description = " "
+ }
+ @{Name="$Name";Description="$Description"}
+ }
+
+
+ $Space = " "
+ if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) {
+ # remove the space here
+ __%[1]s_debug "ShellCompDirectiveNoSpace is called"
+ $Space = ""
+ }
+
+ if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
+ (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
+ __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
+
+ # return here to prevent the completion of the extensions
+ return
+ }
+
+ $Values = $Values | Where-Object {
+ # filter the result
+ $_.Name -like "$WordToComplete*"
+
+ # Join the flag back if we have an equal sign flag
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Join the equal sign flag back to the completion value"
+ $_.Name = $Flag + "=" + $_.Name
+ }
+ }
+
+ if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
+ __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
+
+ if ($Values.Length -eq 0) {
+ # Just print an empty string here so the
+ # shell does not start to complete paths.
+ # We cannot use CompletionResult here because
+ # it does not accept an empty string as argument.
+ ""
+ return
+ }
+ }
+
+ # Get the current mode
+ $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
+ __%[1]s_debug "Mode: $Mode"
+
+ $Values | ForEach-Object {
+
+ # store temporary because switch will overwrite $_
+ $comp = $_
+
+ # PowerShell supports three different completion modes
+ # - TabCompleteNext (default windows style - on each key press the next option is displayed)
+ # - Complete (works like bash)
+ # - MenuComplete (works like zsh)
+ # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function
+
+ # CompletionResult Arguments:
+ # 1) CompletionText text to be used as the auto completion result
+ # 2) ListItemText text to be displayed in the suggestion list
+ # 3) ResultType type of completion result
+ # 4) ToolTip text for the tooltip with details about the object
+
+ switch ($Mode) {
+
+ # bash like
+ "Complete" {
+
+ if ($Values.Length -eq 1) {
+ __%[1]s_debug "Only one completion left"
+
+ # insert space after value
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+
+ } else {
+ # Add the proper number of spaces to align the descriptions
+ while($comp.Name.Length -lt $Longest) {
+ $comp.Name = $comp.Name + " "
+ }
+
+ # Check for empty description and only add parentheses if needed
+ if ($($comp.Description) -eq " " ) {
+ $Description = ""
+ } else {
+ $Description = " ($($comp.Description))"
+ }
+
+ [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ # zsh like
+ "MenuComplete" {
+ # insert space after value
+ # MenuComplete will automatically show the ToolTip of
+ # the highlighted value at the bottom of the suggestions.
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+
+ # TabCompleteNext and in case we get something unknown
+ Default {
+ # Like MenuComplete but we don't want to add a space here because
+ # the user need to press space anyway to get the completion.
+ # Description will not be shown because thats not possible with TabCompleteNext
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ }
+}
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genPowerShellComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genPowerShellCompletion(outFile, includeDesc)
+}
+
+// GenPowerShellCompletionFile generates powershell completion file without descriptions.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ return c.genPowerShellCompletionFile(filename, false)
+}
+
+// GenPowerShellCompletion generates powershell completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ return c.genPowerShellCompletion(w, false)
+}
+
+// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error {
+ return c.genPowerShellCompletionFile(filename, true)
+}
+
+// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error {
+ return c.genPowerShellCompletion(w, true)
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md
new file mode 100644
index 000000000..c449f1e5c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.md
@@ -0,0 +1,3 @@
+# Generating PowerShell Completions For Your Own cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details.
diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md
new file mode 100644
index 000000000..d98a71e36
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md
@@ -0,0 +1,38 @@
+## Projects using Cobra
+
+- [Arduino CLI](https://github.com/arduino/arduino-cli)
+- [Bleve](http://www.blevesearch.com/)
+- [CockroachDB](http://www.cockroachlabs.com/)
+- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
+- [Delve](https://github.com/derekparker/delve)
+- [Docker (distribution)](https://github.com/docker/distribution)
+- [Etcd](https://etcd.io/)
+- [Gardener](https://github.com/gardener/gardenctl)
+- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl)
+- [Git Bump](https://github.com/erdaltsksn/git-bump)
+- [Github CLI](https://github.com/cli/cli)
+- [GitHub Labeler](https://github.com/erdaltsksn/gh-label)
+- [Golangci-lint](https://golangci-lint.run)
+- [GopherJS](http://www.gopherjs.org/)
+- [Helm](https://helm.sh)
+- [Hugo](https://gohugo.io)
+- [Istio](https://istio.io)
+- [Kool](https://github.com/kool-dev/kool)
+- [Kubernetes](http://kubernetes.io/)
+- [Linkerd](https://linkerd.io/)
+- [Mattermost-server](https://github.com/mattermost/mattermost-server)
+- [Metal Stack CLI](https://github.com/metal-stack/metalctl)
+- [Moby (former Docker)](https://github.com/moby/moby)
+- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+- [OpenShift](https://www.openshift.com/)
+- [Ory Hydra](https://github.com/ory/hydra)
+- [Ory Kratos](https://github.com/ory/kratos)
+- [Pouch](https://github.com/alibaba/pouch)
+- [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+- [Prototool](https://github.com/uber/prototool)
+- [Random](https://github.com/erdaltsksn/random)
+- [Rclone](https://rclone.org/)
+- [Skaffold](https://skaffold.dev/)
+- [Tendermint](https://github.com/tendermint/tendermint)
+- [Twitch CLI](https://github.com/twitchdev/twitch-cli)
+- [Werf](https://werf.io/)
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 000000000..d99bf91e5
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,84 @@
+package cobra
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired instructs the various shell completion implementations to
+// prioritize the named persistent flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for the named persistent flag to the
+// specified file extensions.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func (c *Command) MarkFlagDirname(name string) error {
+ return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to limit completions for the named persistent flag to
+// directory names.
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+ return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{})
+}
diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md
new file mode 100644
index 000000000..4ba06a11c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.md
@@ -0,0 +1,546 @@
+# Generating shell completions
+
+Cobra can generate shell completions for multiple shells.
+The currently supported shells are:
+- Bash
+- Zsh
+- fish
+- PowerShell
+
+Cobra will automatically provide your program with a fully functional `completion` command,
+similarly to how it provides the `help` command.
+
+## Creating your own completion command
+
+If you do not wish to use the default `completion` command, you can choose to
+provide your own, which will take precedence over the default one. (This also provides
+backwards-compatibility with programs that already have their own `completion` command.)
+
+If you are using the generator, you can create a completion command by running
+
+```bash
+cobra add completion
+```
+and then modifying the generated `cmd/completion.go` file to look something like this
+(writing the shell script to stdout allows the most flexible use):
+
+```go
+var completionCmd = &cobra.Command{
+ Use: "completion [bash|zsh|fish|powershell]",
+ Short: "Generate completion script",
+ Long: `To load completions:
+
+Bash:
+
+ $ source <(yourprogram completion bash)
+
+ # To load completions for each session, execute once:
+ # Linux:
+ $ yourprogram completion bash > /etc/bash_completion.d/yourprogram
+ # macOS:
+ $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram
+
+Zsh:
+
+ # If shell completion is not already enabled in your environment,
+ # you will need to enable it. You can execute the following once:
+
+ $ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+ # To load completions for each session, execute once:
+ $ yourprogram completion zsh > "${fpath[1]}/_yourprogram"
+
+ # You will need to start a new shell for this setup to take effect.
+
+fish:
+
+ $ yourprogram completion fish | source
+
+ # To load completions for each session, execute once:
+ $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
+
+PowerShell:
+
+ PS> yourprogram completion powershell | Out-String | Invoke-Expression
+
+ # To load completions for every new session, run:
+ PS> yourprogram completion powershell > yourprogram.ps1
+ # and source this file from your PowerShell profile.
+`,
+ DisableFlagsInUseLine: true,
+ ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
+ Args: cobra.ExactValidArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ switch args[0] {
+ case "bash":
+ cmd.Root().GenBashCompletion(os.Stdout)
+ case "zsh":
+ cmd.Root().GenZshCompletion(os.Stdout)
+ case "fish":
+ cmd.Root().GenFishCompletion(os.Stdout, true)
+ case "powershell":
+ cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
+ }
+ },
+}
+```
+
+**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed.
+
+## Adapting the default completion command
+
+Cobra provides a few options for the default `completion` command. To configure such options you must set
+the `CompletionOptions` field on the *root* command.
+
+To tell Cobra *not* to provide the default `completion` command:
+```
+rootCmd.CompletionOptions.DisableDefaultCmd = true
+```
+
+To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands:
+```
+rootCmd.CompletionOptions.DisableNoDescFlag = true
+```
+
+To tell Cobra to completely disable descriptions for completions:
+```
+rootCmd.CompletionOptions.DisableDescriptions = true
+```
+
+# Customizing completions
+
+The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values.
+
+## Completion of nouns
+
+### Static completion of nouns
+
+Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field.
+For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them.
+Some simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+ Short: "Display one or many resources",
+ Long: get_long,
+ Example: get_example,
+ Run: func(cmd *cobra.Command, args []string) {
+ cobra.CheckErr(RunGet(f, out, cmd, args))
+ },
+ ValidArgs: validArgs,
+}
+```
+
+Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like:
+
+```bash
+$ kubectl get [tab][tab]
+node pod replicationcontroller service
+```
+
+#### Aliases for nouns
+
+If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+ ...
+ ValidArgs: validArgs,
+ ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+$ kubectl get rc [tab][tab]
+backend frontend database
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of
+replication controllers following `rc`.
+
+### Dynamic completion of nouns
+
+In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both.
+Simplified code from `helm status` looks like:
+
+```go
+cmd := &cobra.Command{
+ Use: "status RELEASE_NAME",
+ Short: "Display the status of the named release",
+ Long: status_long,
+ RunE: func(cmd *cobra.Command, args []string) {
+ RunGet(args[0])
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster.
+Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like:
+
+```bash
+$ helm status [tab][tab]
+harbor notary rook thanos
+```
+You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp`
+```go
+// Indicates that the shell will perform its default behavior after completions
+// have been provided (this implies none of the other directives).
+ShellCompDirectiveDefault
+
+// Indicates an error occurred and completions should be ignored.
+ShellCompDirectiveError
+
+// Indicates that the shell should not add a space after the completion,
+// even if there is a single completion provided.
+ShellCompDirectiveNoSpace
+
+// Indicates that the shell should not provide file completion even when
+// no completion is provided.
+ShellCompDirectiveNoFileComp
+
+// Indicates that the returned completions should be used as file extension filters.
+// For example, to complete only files of the form *.json or *.yaml:
+// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt
+// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename()
+// is a shortcut to using this directive explicitly.
+//
+ShellCompDirectiveFilterFileExt
+
+// Indicates that only directory names should be provided in file completion.
+// For example:
+// return nil, ShellCompDirectiveFilterDirs
+// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly.
+//
+// To request directory names within another directory, the returned completions
+// should specify a single directory name within which to search. For example,
+// to complete directories within "themes/":
+// return []string{"themes"}, ShellCompDirectiveFilterDirs
+//
+ShellCompDirectiveFilterDirs
+```
+
+***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
+
+#### Debugging
+
+Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly:
+```bash
+$ helm __complete status har
+harbor
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command:
+```bash
+$ helm __complete status ""
+harbor
+notary
+rook
+thanos
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code:
+```go
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and optionally prints to stderr.
+cobra.CompDebug(msg string, printToStdErr bool) {
+cobra.CompDebugln(msg string, printToStdErr bool)
+
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and to stderr.
+cobra.CompError(msg string)
+cobra.CompErrorln(msg string)
+```
+***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above.
+
+## Completions for flags
+
+### Mark flags as required
+
+Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so:
+
+```go
+cmd.MarkFlagRequired("pod")
+cmd.MarkFlagRequired("container")
+```
+
+and you'll get something like
+
+```bash
+$ kubectl exec [tab][tab]
+-c --container= -p --pod=
+```
+
+### Specify dynamic flag completion
+
+As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function.
+
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault
+})
+```
+Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so:
+
+```bash
+$ helm status --output [tab][tab]
+json table yaml
+```
+
+#### Debugging
+
+You can also easily debug your Go completion code for flags:
+```bash
+$ helm __complete status --output ""
+json
+table
+yaml
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above.
+
+### Specify valid filename extensions for flags that take a filename
+
+To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so:
+```go
+flagName := "output"
+cmd.MarkFlagFilename(flagName, "yaml", "json")
+```
+or
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt})
+```
+
+### Limit flag completions to directory names
+
+To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so:
+```go
+flagName := "output"
+cmd.MarkFlagDirname(flagName)
+```
+or
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return nil, cobra.ShellCompDirectiveFilterDirs
+})
+```
+To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so:
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs
+})
+```
+### Descriptions for completions
+
+Cobra provides support for completion descriptions. Such descriptions are supported for each shell
+(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)).
+For commands and flags, Cobra will provide the descriptions automatically, based on usage information.
+For example, using zsh:
+```
+$ helm s[tab]
+search -- search for a keyword in charts
+show -- show information of a chart
+status -- displays the status of the named release
+```
+while using fish:
+```
+$ helm s[tab]
+search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
+```
+
+Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example:
+```go
+ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp
+}}
+```
+or
+```go
+ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"}
+```
+## Bash completions
+
+### Dependencies
+
+The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion))
+
+### Aliases
+
+You can also configure `bash` aliases for your program and they will also support completions.
+
+```bash
+alias aliasname=origcommand
+complete -o default -F __start_origcommand aliasname
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$ aliasname
+completion firstcommand secondcommand
+```
+### Bash legacy dynamic completions
+
+For backward compatibility, Cobra still supports its bash legacy dynamic completion solution.
+Please refer to [Bash Completions](bash_completions.md) for details.
+
+### Bash completion V2
+
+Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling
+`GenBashCompletion()` or `GenBashCompletionFile()`.
+
+A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or
+`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion
+(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion
+solution described in this document.
+Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash
+completion V2 solution which provides the following extra features:
+- Supports completion descriptions (like the other shells)
+- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines)
+- Streamlined user experience thanks to a completion behavior aligned with the other shells
+
+`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()`
+you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra
+will provide the description automatically based on usage information. You can choose to make this option configurable by
+your users.
+
+```
+# With descriptions
+$ helm s[tab][tab]
+search (search for a keyword in charts) status (display the status of the named release)
+show (show information of a chart)
+
+# Without descriptions
+$ helm s[tab][tab]
+search show status
+```
+**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command.
+## Zsh completions
+
+Cobra supports native zsh completion generated from the root `cobra.Command`.
+The generated completion script should be put somewhere in your `$fpath` and be named
+`_`. You will need to start a new shell for the completions to become available.
+
+Zsh supports descriptions for completions. Cobra will provide the description automatically,
+based on usage information. Cobra provides a way to completely disable such descriptions by
+using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make
+this a configurable option to your users.
+```
+# With descriptions
+$ helm s[tab]
+search -- search for a keyword in charts
+show -- show information of a chart
+status -- displays the status of the named release
+
+# Without descriptions
+$ helm s[tab]
+search show status
+```
+*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`.
+
+### Limitations
+
+* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
+* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`.
+ * You should instead use `RegisterFlagCompletionFunc()`.
+
+### Zsh completions standardization
+
+Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced.
+Please refer to [Zsh Completions](zsh_completions.md) for details.
+
+## fish completions
+
+Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+```
+# With descriptions
+$ helm s[tab]
+search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
+
+# Without descriptions
+$ helm s[tab]
+search show status
+```
+*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`.
+
+### Limitations
+
+* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
+* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`.
+ * You should instead use `RegisterFlagCompletionFunc()`.
+* The following flag completion annotations are not supported and will be ignored for `fish`:
+ * `BashCompFilenameExt` (filtering by file extension)
+ * `BashCompSubdirsInDir` (filtering by directory)
+* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`:
+ * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension)
+ * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory)
+* Similarly, the following completion directives are not supported and will be ignored for `fish`:
+ * `ShellCompDirectiveFilterFileExt` (filtering by file extension)
+ * `ShellCompDirectiveFilterDirs` (filtering by directory)
+
+## PowerShell completions
+
+Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+
+The script is designed to support all three PowerShell completion modes:
+
+* TabCompleteNext (default windows style - on each key press the next option is displayed)
+* Complete (works like bash)
+* MenuComplete (works like zsh)
+
+You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode.
+
+Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
+
+```
+# With descriptions and Mode 'Complete'
+$ helm s[tab]
+search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
+
+# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions.
+$ helm s[tab]
+search show status
+
+search for a keyword in charts
+
+# Without descriptions
+$ helm s[tab]
+search show status
+```
+
+### Limitations
+
+* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
+* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`.
+ * You should instead use `RegisterFlagCompletionFunc()`.
+* The following flag completion annotations are not supported and will be ignored for `powershell`:
+ * `BashCompFilenameExt` (filtering by file extension)
+ * `BashCompSubdirsInDir` (filtering by directory)
+* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`:
+ * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension)
+ * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory)
+* Similarly, the following completion directives are not supported and will be ignored for `powershell`:
+ * `ShellCompDirectiveFilterFileExt` (filtering by file extension)
+ * `ShellCompDirectiveFilterDirs` (filtering by directory)
diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md
new file mode 100644
index 000000000..311abce28
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/user_guide.md
@@ -0,0 +1,637 @@
+# User Guide
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+ ▾ appName/
+ ▾ cmd/
+ add.go
+ your.go
+ commands.go
+ here.go
+ main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+ love by spf13 and friends in Go.
+ Complete documentation is available at http://hugo.spf13.com`,
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+var (
+ // Used for flags.
+ cfgFile string
+ userLicense string
+
+ rootCmd = &cobra.Command{
+ Use: "cobra",
+ Short: "A generator for Cobra based Applications",
+ Long: `Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+ }
+)
+
+// Execute executes the root command.
+func Execute() error {
+ return rootCmd.Execute()
+}
+
+func init() {
+ cobra.OnInitialize(initConfig)
+
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
+ rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE ")
+ viper.SetDefault("license", "apache")
+
+ rootCmd.AddCommand(addCmd)
+ rootCmd.AddCommand(initCmd)
+}
+
+func initConfig() {
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := os.UserHomeDir()
+ cobra.CheckErr(err)
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigType("yaml")
+ viper.SetConfigName(".cobra")
+ }
+
+ viper.AutomaticEnv()
+
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
+}
+```
+
+### Returning and handling errors
+
+If you wish to return an error to the caller of a command, `RunE` can be used.
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(tryCmd)
+}
+
+var tryCmd = &cobra.Command{
+ Use: "try",
+ Short: "Try and possibly fail at something",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if err := someFunc(); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+```
+
+The error can then be caught at the execute function call.
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent', meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally, which will only apply to that specific command.
+
+```go
+localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default, Cobra only parses local flags on the target command, and any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+ Use: "print [OPTIONS] [COMMANDS]",
+ TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example, the persistent flag `author` is bound with `viper`.
+**Note**: the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+Or, for persistent flags:
+```go
+rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkPersistentFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ if len(args) < 1 {
+ return errors.New("requires a color argument")
+ }
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable, meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdTimes = &cobra.Command{
+ Use: "times [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called. Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+ $ cobra help
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.
+
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+ $ cobra --invalid
+ Error: unknown flag: --invalid
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+ server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+ delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md).
+
+## Generating shell completions
+
+Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 000000000..1afec30ea
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,258 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// GenZshCompletionFile generates zsh completion file including descriptions.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ return c.genZshCompletionFile(filename, true)
+}
+
+// GenZshCompletion generates zsh completion file including descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ return c.genZshCompletion(w, true)
+}
+
+// GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
+func (c *Command) GenZshCompletionFileNoDesc(filename string) error {
+ return c.genZshCompletionFile(filename, false)
+}
+
+// GenZshCompletionNoDesc generates zsh completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletionNoDesc(w io.Writer) error {
+ return c.genZshCompletion(w, false)
+}
+
+// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
+// not consistent with Bash completion. It has therefore been disabled.
+// Instead, when no other completion is specified, file completion is done by
+// default for every argument. One can disable file completion on a per-argument
+// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
+// To achieve file extension filtering, one can use ValidArgsFunction and
+// ShellCompDirectiveFilterFileExt.
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+ return nil
+}
+
+// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
+// been disabled.
+// To achieve the same behavior across all shells, one can use
+// ValidArgs (for the first argument only) or ValidArgsFunction for
+// any argument (can include the first one also).
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+ return nil
+}
+
+func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genZshCompletion(outFile, includeDesc)
+}
+
+func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genZshComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s
+
+# zsh completion for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ local file="$BASH_COMP_DEBUG_FILE"
+ if [[ -n ${file} ]]; then
+ echo "$*" >> "${file}"
+ fi
+}
+
+_%[1]s()
+{
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace
+ local -a completions
+
+ __%[1]s_debug "\n========= starting completion logic =========="
+ __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CURRENT location, so we need
+ # to truncate the command-line ($words) up to the $CURRENT location.
+ # (We cannot use $CURSOR as its value does not work when a command is an alias.)
+ words=("${=words[1,CURRENT]}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ lastParam=${words[-1]}
+ lastChar=${lastParam[-1]}
+ __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}"
+
+ # For zsh, when completing a flag with an = (e.g., %[1]s -n=)
+ # completions must be prefixed with the flag
+ setopt local_options BASH_REMATCH
+ if [[ "${lastParam}" =~ '-.*=' ]]; then
+ # We are dealing with a flag with an =
+ flagPrefix="-P ${BASH_REMATCH}"
+ fi
+
+ # Prepare the command to obtain completions
+ requestComp="${words[1]} %[2]s ${words[2,-1]}"
+ if [ "${lastChar}" = "" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go completion code.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "About to call: eval ${requestComp}"
+
+ # Use eval to handle any environment variables and such
+ out=$(eval ${requestComp} 2>/dev/null)
+ __%[1]s_debug "completion output: ${out}"
+
+ # Extract the directive integer following a : from the last line
+ local lastLine
+ while IFS='\n' read -r line; do
+ lastLine=${line}
+ done < <(printf "%%s\n" "${out[@]}")
+ __%[1]s_debug "last line: ${lastLine}"
+
+ if [ "${lastLine[1]}" = : ]; then
+ directive=${lastLine[2,-1]}
+ # Remove the directive including the : and the newline
+ local suffix
+ (( suffix=${#lastLine}+2))
+ out=${out[1,-$suffix]}
+ else
+ # There is no directive specified. Leave $out as is.
+ __%[1]s_debug "No directive found. Setting do default"
+ directive=0
+ fi
+
+ __%[1]s_debug "directive: ${directive}"
+ __%[1]s_debug "completions: ${out}"
+ __%[1]s_debug "flagPrefix: ${flagPrefix}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ __%[1]s_debug "Completion received error. Ignoring completions."
+ return
+ fi
+
+ while IFS='\n' read -r comp; do
+ if [ -n "$comp" ]; then
+ # If requested, completions are returned with a description.
+ # The description is preceded by a TAB character.
+ # For zsh's _describe, we need to use a : instead of a TAB.
+ # We first need to escape any : as part of the completion itself.
+ comp=${comp//:/\\:}
+
+ local tab=$(printf '\t')
+ comp=${comp//$tab/:}
+
+ __%[1]s_debug "Adding completion: ${comp}"
+ completions+=${comp}
+ lastComp=$comp
+ fi
+ done < <(printf "%%s\n" "${out[@]}")
+
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ __%[1]s_debug "Activating nospace."
+ noSpace="-S ''"
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local filteringCmd
+ filteringCmd='_files'
+ for filter in ${completions[@]}; do
+ if [ ${filter[1]} != '*' ]; then
+ # zsh requires a glob pattern to do file filtering
+ filter="\*.$filter"
+ fi
+ filteringCmd+=" -g $filter"
+ done
+ filteringCmd+=" ${flagPrefix}"
+
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ _arguments '*:filename:'"$filteringCmd"
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subDir
+ subdir="${completions[1]}"
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "${subdir}" >/dev/null 2>&1
+ else
+ __%[1]s_debug "Listing directories in ."
+ fi
+
+ local result
+ _arguments '*:dirname:_files -/'" ${flagPrefix}"
+ result=$?
+ if [ -n "$subdir" ]; then
+ popd >/dev/null 2>&1
+ fi
+ return $result
+ else
+ __%[1]s_debug "Calling _describe"
+ if eval _describe "completions" completions $flagPrefix $noSpace; then
+ __%[1]s_debug "_describe found some completions"
+
+ # Return the success of having called _describe
+ return 0
+ else
+ __%[1]s_debug "_describe did not find completions."
+ __%[1]s_debug "Checking if we should do file completion."
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ __%[1]s_debug "deactivating file completion"
+
+ # We must return an error code here to let zsh know that there were no
+ # completions found by _describe; this is what will trigger other
+ # matching algorithms to attempt to find completions.
+ # For example zsh can match letters in the middle of words.
+ return 1
+ else
+ # Perform file completion
+ __%[1]s_debug "Activating file completion"
+
+ # We must return the result of this command, so it must be the
+ # last command, or else we must store its result to return it.
+ _arguments '*:filename:_files'" ${flagPrefix}"
+ fi
+ fi
+ fi
+}
+
+# don't run the completion function when being source-ed or eval-ed
+if [ "$funcstack[1]" = "_%[1]s" ]; then
+ _%[1]s
+fi
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md
new file mode 100644
index 000000000..7cff61787
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.md
@@ -0,0 +1,48 @@
+## Generating Zsh Completion For Your cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md) for details.
+
+## Zsh completions standardization
+
+Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced.
+
+### Deprecation summary
+
+See further below for more details on these deprecations.
+
+* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored.
+* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored.
+ * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`.
+* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored.
+ * Instead use `ValidArgsFunction`.
+
+### Behavioral changes
+
+**Noun completion**
+|Old behavior|New behavior|
+|---|---|
+|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis|
+|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`|
+`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored|
+|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)|
+|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior|
+
+**Flag-value completion**
+
+|Old behavior|New behavior|
+|---|---|
+|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion|
+|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored|
+|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)|
+|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells|
+|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`|
+|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`|
+
+**Improvements**
+
+* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`)
+* File completion by default if no other completions found
+* Handling of required flags
+* File extension filtering no longer mutually exclusive with bash usage
+* Completion of directory names *within* another directory
+* Support for `=` form of flags
diff --git a/vendor/github.com/spf13/jwalterweatherman/.gitignore b/vendor/github.com/spf13/jwalterweatherman/.gitignore
new file mode 100644
index 000000000..a71f88af8
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.bench
+go.sum
\ No newline at end of file
diff --git a/vendor/github.com/urfave/cli/v2/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE
similarity index 93%
rename from vendor/github.com/urfave/cli/v2/LICENSE
rename to vendor/github.com/spf13/jwalterweatherman/LICENSE
index 42a597e29..4527efb9c 100644
--- a/vendor/github.com/urfave/cli/v2/LICENSE
+++ b/vendor/github.com/spf13/jwalterweatherman/LICENSE
@@ -1,6 +1,6 @@
-MIT License
+The MIT License (MIT)
-Copyright (c) 2016 Jeremy Saenz & Contributors
+Copyright (c) 2014 Steve Francia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md
new file mode 100644
index 000000000..932a23fc6
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/README.md
@@ -0,0 +1,148 @@
+jWalterWeatherman
+=================
+
+Seamless printing to the terminal (stdout) and logging to a io.Writer
+(file) that’s as easy to use as fmt.Println.
+
+![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg)
+Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422)
+
+JWW is primarily a wrapper around the excellent standard log library. It
+provides a few advantages over using the standard log library alone.
+
+1. Ready to go out of the box.
+2. One library for both printing to the terminal and logging (to files).
+3. Really easy to log to either a temp file or a file you specify.
+
+
+I really wanted a very straightforward library that could seamlessly do
+the following things.
+
+1. Replace all the println, printf, etc statements thoughout my code with
+ something more useful
+2. Allow the user to easily control what levels are printed to stdout
+3. Allow the user to easily control what levels are logged
+4. Provide an easy mechanism (like fmt.Println) to print info to the user
+ which can be easily logged as well
+5. Due to 2 & 3 provide easy verbose mode for output and logs
+6. Not have any unnecessary initialization cruft. Just use it.
+
+# Usage
+
+## Step 1. Use it
+Put calls throughout your source based on type of feedback.
+No initialization or setup needs to happen. Just start calling things.
+
+Available Loggers are:
+
+ * TRACE
+ * DEBUG
+ * INFO
+ * WARN
+ * ERROR
+ * CRITICAL
+ * FATAL
+
+These each are loggers based on the log standard library and follow the
+standard usage. Eg.
+
+```go
+ import (
+ jww "github.com/spf13/jwalterweatherman"
+ )
+
+ ...
+
+ if err != nil {
+
+ // This is a pretty serious error and the user should know about
+ // it. It will be printed to the terminal as well as logged under the
+ // default thresholds.
+
+ jww.ERROR.Println(err)
+ }
+
+ if err2 != nil {
+ // This error isn’t going to materially change the behavior of the
+ // application, but it’s something that may not be what the user
+ // expects. Under the default thresholds, Warn will be logged, but
+ // not printed to the terminal.
+
+ jww.WARN.Println(err2)
+ }
+
+ // Information that’s relevant to what’s happening, but not very
+ // important for the user. Under the default thresholds this will be
+ // discarded.
+
+ jww.INFO.Printf("information %q", response)
+
+```
+
+NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook:
+
+```go
+notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+notepad.WARN.Println("Some warning"")
+```
+
+_Why 7 levels?_
+
+Maybe you think that 7 levels are too much for any application... and you
+are probably correct. Just because there are seven levels doesn’t mean
+that you should be using all 7 levels. Pick the right set for your needs.
+Remember they only have to mean something to your project.
+
+## Step 2. Optionally configure JWW
+
+Under the default thresholds :
+
+ * Debug, Trace & Info goto /dev/null
+ * Warn and above is logged (when a log file/io.Writer is provided)
+ * Error and above is printed to the terminal (stdout)
+
+### Changing the thresholds
+
+The threshold can be changed at any time, but will only affect calls that
+execute after the change was made.
+
+This is very useful if your application has a verbose mode. Of course you
+can decide what verbose means to you or even have multiple levels of
+verbosity.
+
+
+```go
+ import (
+ jww "github.com/spf13/jwalterweatherman"
+ )
+
+ if Verbose {
+ jww.SetLogThreshold(jww.LevelTrace)
+ jww.SetStdoutThreshold(jww.LevelInfo)
+ }
+```
+
+Note that JWW's own internal output uses log levels as well, so set the log
+level before making any other calls if you want to see what it's up to.
+
+
+### Setting a log file
+
+JWW can log to any `io.Writer`:
+
+
+```go
+
+ jww.SetLogOutput(customWriter)
+
+```
+
+
+# More information
+
+This is an early release. I’ve been using it for a while and this is the
+third interface I’ve tried. I like this one pretty well, but no guarantees
+that it won’t change a bit.
+
+I wrote this for use in [hugo](https://gohugo.io). If you are looking
+for a static website engine that’s super fast please checkout Hugo.
diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
new file mode 100644
index 000000000..a018c15c4
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
@@ -0,0 +1,111 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+)
+
+var (
+ TRACE *log.Logger
+ DEBUG *log.Logger
+ INFO *log.Logger
+ WARN *log.Logger
+ ERROR *log.Logger
+ CRITICAL *log.Logger
+ FATAL *log.Logger
+
+ LOG *log.Logger
+ FEEDBACK *Feedback
+
+ defaultNotepad *Notepad
+)
+
+func reloadDefaultNotepad() {
+ TRACE = defaultNotepad.TRACE
+ DEBUG = defaultNotepad.DEBUG
+ INFO = defaultNotepad.INFO
+ WARN = defaultNotepad.WARN
+ ERROR = defaultNotepad.ERROR
+ CRITICAL = defaultNotepad.CRITICAL
+ FATAL = defaultNotepad.FATAL
+
+ LOG = defaultNotepad.LOG
+ FEEDBACK = defaultNotepad.FEEDBACK
+}
+
+func init() {
+ defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+ reloadDefaultNotepad()
+}
+
+// SetLogThreshold set the log threshold for the default notepad. Trace by default.
+func SetLogThreshold(threshold Threshold) {
+ defaultNotepad.SetLogThreshold(threshold)
+ reloadDefaultNotepad()
+}
+
+// SetLogOutput set the log output for the default notepad. Discarded by default.
+func SetLogOutput(handle io.Writer) {
+ defaultNotepad.SetLogOutput(handle)
+ reloadDefaultNotepad()
+}
+
+// SetStdoutThreshold set the standard output threshold for the default notepad.
+// Info by default.
+func SetStdoutThreshold(threshold Threshold) {
+ defaultNotepad.SetStdoutThreshold(threshold)
+ reloadDefaultNotepad()
+}
+
+// SetStdoutOutput set the stdout output for the default notepad. Default is stdout.
+func SetStdoutOutput(handle io.Writer) {
+ defaultNotepad.outHandle = handle
+ defaultNotepad.init()
+ reloadDefaultNotepad()
+}
+
+// SetPrefix set the prefix for the default logger. Empty by default.
+func SetPrefix(prefix string) {
+ defaultNotepad.SetPrefix(prefix)
+ reloadDefaultNotepad()
+}
+
+// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default.
+func SetFlags(flags int) {
+ defaultNotepad.SetFlags(flags)
+ reloadDefaultNotepad()
+}
+
+// SetLogListeners configures the default logger with one or more log listeners.
+func SetLogListeners(l ...LogListener) {
+ defaultNotepad.logListeners = l
+ defaultNotepad.init()
+ reloadDefaultNotepad()
+}
+
+// Level returns the current global log threshold.
+func LogThreshold() Threshold {
+ return defaultNotepad.logThreshold
+}
+
+// Level returns the current global output threshold.
+func StdoutThreshold() Threshold {
+ return defaultNotepad.stdoutThreshold
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func GetLogThreshold() Threshold {
+ return defaultNotepad.GetLogThreshold()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func GetStdoutThreshold() Threshold {
+ return defaultNotepad.GetStdoutThreshold()
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
new file mode 100644
index 000000000..41285f3dc
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -0,0 +1,46 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "io"
+ "sync/atomic"
+)
+
+// Counter is an io.Writer that increments a counter on Write.
+type Counter struct {
+ count uint64
+}
+
+func (c *Counter) incr() {
+ atomic.AddUint64(&c.count, 1)
+}
+
+// Reset resets the counter.
+func (c *Counter) Reset() {
+ atomic.StoreUint64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *Counter) Count() uint64 {
+ return atomic.LoadUint64(&c.count)
+}
+
+func (c *Counter) Write(p []byte) (n int, err error) {
+ c.incr()
+ return len(p), nil
+}
+
+// LogCounter creates a LogListener that counts log statements >= the given threshold.
+func LogCounter(counter *Counter, t1 Threshold) LogListener {
+ return func(t2 Threshold) io.Writer {
+ if t2 < t1 {
+ // Not interested in this threshold.
+ return nil
+ }
+ return counter
+ }
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
new file mode 100644
index 000000000..cc7957bf7
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -0,0 +1,225 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+)
+
+type Threshold int
+
+func (t Threshold) String() string {
+ return prefixes[t]
+}
+
+const (
+ LevelTrace Threshold = iota
+ LevelDebug
+ LevelInfo
+ LevelWarn
+ LevelError
+ LevelCritical
+ LevelFatal
+)
+
+var prefixes map[Threshold]string = map[Threshold]string{
+ LevelTrace: "TRACE",
+ LevelDebug: "DEBUG",
+ LevelInfo: "INFO",
+ LevelWarn: "WARN",
+ LevelError: "ERROR",
+ LevelCritical: "CRITICAL",
+ LevelFatal: "FATAL",
+}
+
+// Notepad is where you leave a note!
+type Notepad struct {
+ TRACE *log.Logger
+ DEBUG *log.Logger
+ INFO *log.Logger
+ WARN *log.Logger
+ ERROR *log.Logger
+ CRITICAL *log.Logger
+ FATAL *log.Logger
+
+ LOG *log.Logger
+ FEEDBACK *Feedback
+
+ loggers [7]**log.Logger
+ logHandle io.Writer
+ outHandle io.Writer
+ logThreshold Threshold
+ stdoutThreshold Threshold
+ prefix string
+ flags int
+
+ logListeners []LogListener
+}
+
+// A LogListener can ble supplied to a Notepad to listen on log writes for a given
+// threshold. This can be used to capture log events in unit tests and similar.
+// Note that this function will be invoked once for each log threshold. If
+// the given threshold is not of interest to you, return nil.
+// Note that these listeners will receive log events for a given threshold, even
+// if the current configuration says not to log it. That way you can count ERRORs even
+// if you don't print them to the console.
+type LogListener func(t Threshold) io.Writer
+
+// NewNotepad creates a new Notepad.
+func NewNotepad(
+ outThreshold Threshold,
+ logThreshold Threshold,
+ outHandle, logHandle io.Writer,
+ prefix string, flags int,
+ logListeners ...LogListener,
+) *Notepad {
+
+ n := &Notepad{logListeners: logListeners}
+
+ n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
+ n.outHandle = outHandle
+ n.logHandle = logHandle
+ n.stdoutThreshold = outThreshold
+ n.logThreshold = logThreshold
+
+ if len(prefix) != 0 {
+ n.prefix = "[" + prefix + "] "
+ } else {
+ n.prefix = ""
+ }
+
+ n.flags = flags
+
+ n.LOG = log.New(n.logHandle,
+ "LOG: ",
+ n.flags)
+ n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG}
+
+ n.init()
+ return n
+}
+
+// init creates the loggers for each level depending on the notepad thresholds.
+func (n *Notepad) init() {
+ logAndOut := io.MultiWriter(n.outHandle, n.logHandle)
+
+ for t, logger := range n.loggers {
+ threshold := Threshold(t)
+ prefix := n.prefix + threshold.String() + " "
+
+ switch {
+ case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
+ *logger = log.New(n.createLogWriters(threshold, logAndOut), prefix, n.flags)
+
+ case threshold >= n.logThreshold:
+ *logger = log.New(n.createLogWriters(threshold, n.logHandle), prefix, n.flags)
+
+ case threshold >= n.stdoutThreshold:
+ *logger = log.New(n.createLogWriters(threshold, n.outHandle), prefix, n.flags)
+
+ default:
+ *logger = log.New(n.createLogWriters(threshold, ioutil.Discard), prefix, n.flags)
+ }
+ }
+}
+
+func (n *Notepad) createLogWriters(t Threshold, handle io.Writer) io.Writer {
+ if len(n.logListeners) == 0 {
+ return handle
+ }
+ writers := []io.Writer{handle}
+ for _, l := range n.logListeners {
+ w := l(t)
+ if w != nil {
+ writers = append(writers, w)
+ }
+ }
+
+ if len(writers) == 1 {
+ return handle
+ }
+
+ return io.MultiWriter(writers...)
+}
+
+// SetLogThreshold changes the threshold above which messages are written to the
+// log file.
+func (n *Notepad) SetLogThreshold(threshold Threshold) {
+ n.logThreshold = threshold
+ n.init()
+}
+
+// SetLogOutput changes the file where log messages are written.
+func (n *Notepad) SetLogOutput(handle io.Writer) {
+ n.logHandle = handle
+ n.init()
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func (n *Notepad) GetLogThreshold() Threshold {
+ return n.logThreshold
+}
+
+// SetStdoutThreshold changes the threshold above which messages are written to the
+// standard output.
+func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
+ n.stdoutThreshold = threshold
+ n.init()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func (n *Notepad) GetStdoutThreshold() Threshold {
+ return n.stdoutThreshold
+}
+
+// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
+// brackets at the beginning of the line. An empty prefix won't be displayed at all.
+func (n *Notepad) SetPrefix(prefix string) {
+ if len(prefix) != 0 {
+ n.prefix = "[" + prefix + "] "
+ } else {
+ n.prefix = ""
+ }
+ n.init()
+}
+
+// SetFlags choose which flags the logger will display (after prefix and message
+// level). See the package log for more informations on this.
+func (n *Notepad) SetFlags(flags int) {
+ n.flags = flags
+ n.init()
+}
+
+// Feedback writes plainly to the outHandle while
+// logging with the standard extra information (date, file, etc).
+type Feedback struct {
+ out *log.Logger
+ log *log.Logger
+}
+
+func (fb *Feedback) Println(v ...interface{}) {
+ fb.output(fmt.Sprintln(v...))
+}
+
+func (fb *Feedback) Printf(format string, v ...interface{}) {
+ fb.output(fmt.Sprintf(format, v...))
+}
+
+func (fb *Feedback) Print(v ...interface{}) {
+ fb.output(fmt.Sprint(v...))
+}
+
+func (fb *Feedback) output(s string) {
+ if fb.out != nil {
+ fb.out.Output(2, s)
+ }
+ if fb.log != nil {
+ fb.log.Output(2, s)
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 000000000..c3da29013
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 000000000..00d04cb9b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,22 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get golang.org/x/lint/golint
+ - export PATH=$GOPATH/bin:$PATH
+ - go install ./...
+
+script:
+ - verify/all.sh -v
+ - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 000000000..63ed1cfea
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 000000000..7eacc5bdb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
+[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helper functions available to get the value stored in a Flag if you have a FlagSet but find
+it difficult to keep up with all of the pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+ -v, --verbose verbose output
+ --coolflag string it's really cool flag (default "yeaah")
+ --usefulflag int sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+ goflag "flag"
+ flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+ flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+ flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 000000000..c4c5c0bfd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 000000000..3731370d6
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,185 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *boolSliceValue) fromString(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+func (s *boolSliceValue) toString(val bool) string {
+ return strconv.FormatBool(val)
+}
+
+func (s *boolSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *boolSliceValue) Replace(val []string) error {
+ out := make([]bool, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *boolSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 000000000..67d530457
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+ return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+ bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesHex = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+ return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+ *p = val
+ return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+ bin, err := hex.DecodeString(sval)
+
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 000000000..a0b2679f7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
+ *i = countValue(*i + 1)
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 000000000..e9debef88
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 000000000..badadda53
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+ value *[]time.Duration
+ changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+ dsv := new(durationSliceValue)
+ dsv.value = p
+ *dsv.value = val
+ return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *durationSliceValue) Type() string {
+ return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%s", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *durationSliceValue) fromString(val string) (time.Duration, error) {
+ return time.ParseDuration(val)
+}
+
+func (s *durationSliceValue) toString(val time.Duration) string {
+ return fmt.Sprintf("%s", val)
+}
+
+func (s *durationSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *durationSliceValue) Replace(val []string) error {
+ out := make([]time.Duration, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *durationSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []time.Duration{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+ val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+ if err != nil {
+ return []time.Duration{}, err
+ }
+ return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 000000000..24a5036e9
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1239 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagval, "varname", "v", "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ goflag "flag"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+ // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+ UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ // SortFlags is used to indicate, if user wants to have sorted flags in
+ // help/usage messages.
+ SortFlags bool
+
+ // ParseErrorsWhitelist is used to configure a whitelist of errors
+ ParseErrorsWhitelist ParseErrorsWhitelist
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ orderedActual []*Flag
+ sortedActual []*Flag
+ formal map[NormalizedName]*Flag
+ orderedFormal []*Flag
+ sortedFormal []*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+ addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string // default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// SliceValue is a secondary interface to all flags which hold a list
+// of values. This allows full control over the value of list flags,
+// and avoids complicated marshalling and unmarshalling to csv.
+type SliceValue interface {
+ // Append adds the specified value to the end of the flag value list.
+ Append(string) error
+ // Replace will fully overwrite any data currently in the flag value list.
+ Replace([]string) error
+ // GetSlice returns the flag value list as an array of strings.
+ GetSlice() []string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ f.sortedFormal = f.sortedFormal[:0]
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ if len(f.formal) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.formal) != len(f.sortedFormal) {
+ f.sortedFormal = sortFlags(f.formal)
+ }
+ flags = f.sortedFormal
+ } else {
+ flags = f.orderedFormal
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+ for _, flag := range f.formal {
+ if !flag.Hidden {
+ return true
+ }
+ }
+ return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ if len(f.actual) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.actual) != len(f.sortedActual) {
+ f.sortedActual = sortFlags(f.actual)
+ }
+ flags = f.sortedActual
+ } else {
+ flags = f.orderedActual
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+ if name == "" {
+ return nil
+ }
+ if len(name) > 1 {
+ msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ c := name[0]
+ return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ flag.Hidden = true
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+ return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+
+ err := flag.Value.Set(value)
+ if err != nil {
+ var flagName string
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ flagName = fmt.Sprintf("--%s", flag.Name)
+ }
+ return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+ }
+
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
+
+ flag.Changed = true
+ }
+
+ if flag.Deprecated != "" {
+ fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+ switch f.Value.(type) {
+ case boolFlag:
+ return f.DefValue == "false"
+ case *durationValue:
+ // Beginning in Go 1.7, duration zero values are "0s"
+ return f.DefValue == "0" || f.DefValue == "0s"
+ case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+ return f.DefValue == "0"
+ case *stringValue:
+ return f.DefValue == ""
+ case *ipValue, *ipMaskValue, *ipNetValue:
+ return f.DefValue == ""
+ case *intSliceValue, *stringSliceValue, *stringArrayValue:
+ return f.DefValue == "[]"
+ default:
+ switch f.Value.String() {
+ case "false":
+ return true
+ case "":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+ }
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+
+ name = flag.Value.Type()
+ switch name {
+ case "bool":
+ name = ""
+ case "float64":
+ name = "float"
+ case "int64":
+ name = "int"
+ case "uint64":
+ name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
+ case "uintSlice":
+ name = "uints"
+ case "boolSlice":
+ name = "bools"
+ }
+
+ return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t\n")
+ if w <= 0 {
+ return s, ""
+ }
+ nlPos := strings.LastIndex(s[:i], "\n")
+ if nlPos > 0 && nlPos < w {
+ return s[:nlPos], s[nlPos+1:]
+ }
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return strings.Replace(s, "\n", r, -1)
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+ buf := new(bytes.Buffer)
+
+ lines := make([]string, 0, len(f.formal))
+
+ maxlen := 0
+ f.VisitAll(func(flag *Flag) {
+ if flag.Hidden {
+ return
+ }
+
+ line := ""
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ line = fmt.Sprintf(" --%s", flag.Name)
+ }
+
+ varname, usage := UnquoteUsage(flag)
+ if varname != "" {
+ line += " " + varname
+ }
+ if flag.NoOptDefVal != "" {
+ switch flag.Value.Type() {
+ case "string":
+ line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+ case "bool":
+ if flag.NoOptDefVal != "true" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ default:
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ }
+
+ // This special character will be replaced with spacing once the
+ // correct alignment is calculated
+ line += "\x00"
+ if len(line) > maxlen {
+ maxlen = len(line)
+ }
+
+ line += usage
+ if !flag.defaultIsZeroValue() {
+ if flag.Value.Type() == "string" {
+ line += fmt.Sprintf(" (default %q)", flag.DefValue)
+ } else {
+ line += fmt.Sprintf(" (default %s)", flag.DefValue)
+ }
+ }
+ if len(flag.Deprecated) != 0 {
+ line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+ }
+
+ lines = append(lines, line)
+ })
+
+ for _, line := range lines {
+ sidx := strings.Index(line, "\x00")
+ spacing := strings.Repeat(" ", maxlen-sidx)
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ }
+
+ return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadyThere := f.formal[normalizedFlagName]
+ if alreadyThere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+ f.orderedFormal = append(f.orderedFormal, flag)
+
+ if flag.Shorthand == "" {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ used, alreadyThere := f.shorthands[c]
+ if alreadyThere {
+ msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ if f.errorHandling != ContinueOnError {
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+ if len(args) == 0 {
+ //--unknown
+ return args
+ }
+
+ first := args[0]
+ if len(first) > 0 && first[0] == '-' {
+ //--unknown --next-flag ...
+ return args
+ }
+
+ //--unknown arg ... (args will be arg ...)
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, exists := f.formal[f.normalizeFlagName(name)]
+
+ if !exists {
+ switch {
+ case name == "help":
+ f.usage()
+ return a, ErrHelp
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // --unknown=unknownval arg ...
+ // we do not want to lose arg in this case
+ if len(split) >= 2 {
+ return a, nil
+ }
+
+ return stripUnknownFlagValue(a), nil
+ default:
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ }
+
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if flag.NoOptDefVal != "" {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
+ if strings.HasPrefix(shorthands, "test.") {
+ return
+ }
+
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, exists := f.shorthands[c]
+ if !exists {
+ switch {
+ case c == 'h':
+ f.usage()
+ err = ErrHelp
+ return
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // '-f=arg arg ...'
+ // we do not want to lose arg in this case
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ outShorts = ""
+ return
+ }
+
+ outArgs = stripUnknownFlagValue(outArgs)
+ return
+ default:
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ }
+
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ // '-f=arg'
+ value = shorthands[2:]
+ outShorts = ""
+ } else if flag.NoOptDefVal != "" {
+ // '-f' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ // '-farg'
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ // '-f arg'
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ // '-f' (arg was required)
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+
+ if flag.ShorthandDeprecated != "" {
+ fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args, fn)
+ } else {
+ args, err = f.parseShortArg(s, args, fn)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ if f.addedGoFlagSets != nil {
+ for _, goFlagSet := range f.addedGoFlagSets {
+ goFlagSet.Parse(nil)
+ }
+ }
+ f.parsed = true
+
+ if len(arguments) < 0 {
+ return nil
+ }
+
+ f.args = make([]string, 0, len(arguments))
+
+ set := func(flag *Flag, value string) error {
+ return f.Set(flag.Name, value)
+ }
+
+ err := f.parseArgs(arguments, set)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ fmt.Println(err)
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ err := f.parseArgs(arguments, fn)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ SortFlags: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 000000000..a243f81f7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go
new file mode 100644
index 000000000..caa352741
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float32Slice Value
+type float32SliceValue struct {
+ value *[]float32
+ changed bool
+}
+
+func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue {
+ isv := new(float32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = float32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float32SliceValue) Type() string {
+ return "float32Slice"
+}
+
+func (s *float32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float32SliceValue) fromString(val string) (float32, error) {
+ t64, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(t64), nil
+}
+
+func (s *float32SliceValue) toString(val float32) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float32SliceValue) Replace(val []string) error {
+ out := make([]float32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = float32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetFloat32Slice return the []float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) {
+ val, err := f.getFlagType(name, "float32Slice", float32SliceConv)
+ if err != nil {
+ return []float32{}, err
+ }
+ return val.([]float32), nil
+}
+
+// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string.
+// The argument p points to a float32[] variable in which to store the value of the flag.
+func Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func Float32Slice(name string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, "", value, usage)
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 000000000..04b5492a7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go
new file mode 100644
index 000000000..85bf3073d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float64Slice Value
+type float64SliceValue struct {
+ value *[]float64
+ changed bool
+}
+
+func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue {
+ isv := new(float64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float64SliceValue) Type() string {
+ return "float64Slice"
+}
+
+func (s *float64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float64SliceValue) fromString(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+func (s *float64SliceValue) toString(val float64) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float64SliceValue) Replace(val []string) error {
+ out := make([]float64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetFloat64Slice return the []float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) {
+ val, err := f.getFlagType(name, "float64Slice", float64SliceConv)
+ if err != nil {
+ return []float64{}, err
+ }
+ return val.([]float64), nil
+}
+
+// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string.
+// The argument p points to a float64[] variable in which to store the value of the flag.
+func Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func Float64Slice(name string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, "", value, usage)
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 000000000..d3dd72b7f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "reflect"
+ "strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ // Ex: if the golang flag was -v, allow both -v and --v to work
+ if len(flag.Name) == 1 {
+ flag.Shorthand = flag.Name
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+ if f.addedGoFlagSets == nil {
+ f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+ }
+ f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 000000000..1474b89df
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 000000000..f1a01d05e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 000000000..9b95944f0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go
new file mode 100644
index 000000000..ff128ff06
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int32Slice Value
+type int32SliceValue struct {
+ value *[]int32
+ changed bool
+}
+
+func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue {
+ isv := new(int32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = int32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int32SliceValue) Type() string {
+ return "int32Slice"
+}
+
+func (s *int32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int32SliceValue) fromString(val string) (int32, error) {
+ t64, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(t64), nil
+}
+
+func (s *int32SliceValue) toString(val int32) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int32SliceValue) Replace(val []string) error {
+ out := make([]int32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = int32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetInt32Slice return the []int32 value of a flag with the given name
+func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) {
+ val, err := f.getFlagType(name, "int32Slice", int32SliceConv)
+ if err != nil {
+ return []int32{}, err
+ }
+ return val.([]int32), nil
+}
+
+// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string.
+// The argument p points to a int32[] variable in which to store the value of the flag.
+func Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func Int32Slice(name string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, "", value, usage)
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 000000000..0026d781d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go
new file mode 100644
index 000000000..25464638f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int64Slice Value
+type int64SliceValue struct {
+ value *[]int64
+ changed bool
+}
+
+func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue {
+ isv := new(int64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int64SliceValue) Type() string {
+ return "int64Slice"
+}
+
+func (s *int64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int64SliceValue) fromString(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+func (s *int64SliceValue) toString(val int64) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int64SliceValue) Replace(val []string) error {
+ out := make([]int64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetInt64Slice return the []int64 value of a flag with the given name
+func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) {
+ val, err := f.getFlagType(name, "int64Slice", int64SliceConv)
+ if err != nil {
+ return []int64{}, err
+ }
+ return val.([]int64), nil
+}
+
+// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string.
+// The argument p points to a int64[] variable in which to store the value of the flag.
+func Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func Int64Slice(name string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, "", value, usage)
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 000000000..4da92228e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 000000000..e71c39d91
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,158 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *intSliceValue) Append(val string) error {
+ i, err := strconv.Atoi(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *intSliceValue) Replace(val []string) error {
+ out := make([]int, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *intSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = strconv.Itoa(d)
+ }
+ return out
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 000000000..3d414ba69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 000000000..775faae4f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,186 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *ipSliceValue) fromString(val string) (net.IP, error) {
+ return net.ParseIP(strings.TrimSpace(val)), nil
+}
+
+func (s *ipSliceValue) toString(val net.IP) string {
+ return val.String()
+}
+
+func (s *ipSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *ipSliceValue) Replace(val []string) error {
+ out := make([]net.IP, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *ipSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 000000000..5bd44bd21
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 000000000..e2c1b8bcd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 000000000..04e0a26ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 000000000..4894af818
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,129 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+ ssv := new(stringArrayValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+ if !s.changed {
+ *s.value = []string{val}
+ s.changed = true
+ } else {
+ *s.value = append(*s.value, val)
+ }
+ return nil
+}
+
+func (s *stringArrayValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringArrayValue) Replace(val []string) error {
+ out := make([]string, len(val))
+ for i, d := range val {
+ var err error
+ out[i] = d
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *stringArrayValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = d
+ }
+ return out
+}
+
+func (s *stringArrayValue) Type() string {
+ return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a array with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 000000000..3cb2e69db
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,163 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+ b := &bytes.Buffer{}
+ w := csv.NewWriter(b)
+ err := w.Write(vals)
+ if err != nil {
+ return "", err
+ }
+ w.Flush()
+ return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ v, err := readAsCSV(val)
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func (s *stringSliceValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringSliceValue) Replace(val []string) error {
+ *s.value = val
+ return nil
+}
+
+func (s *stringSliceValue) GetSlice() []string {
+ return *s.value
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 000000000..5ceda3965
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go
new file mode 100644
index 000000000..a807a04a0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int64.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt64 Value
+type stringToInt64Value struct {
+ value *map[string]int64
+ changed bool
+}
+
+func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value {
+ ssv := new(stringToInt64Value)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToInt64Value) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToInt64Value) Type() string {
+ return "stringToInt64"
+}
+
+func (s *stringToInt64Value) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.FormatInt(v, 10))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToInt64Conv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt64 return the map[string]int64 value of a flag with the given name
+func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) {
+ val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv)
+ if err != nil {
+ return map[string]int64{}, err
+ }
+ return val.(map[string]int64), nil
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, "", value, usage)
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 000000000..890a01afc
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 000000000..dcbc2b758
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 000000000..7e9914edd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 000000000..d8024539b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 000000000..f62240f2c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 000000000..bb0e83c1f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 000000000..5fa924835
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,168 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *uintSliceValue) fromString(val string) (uint, error) {
+ t, err := strconv.ParseUint(val, 10, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(t), nil
+}
+
+func (s *uintSliceValue) toString(val uint) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *uintSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *uintSliceValue) Replace(val []string) error {
+ out := make([]uint, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *uintSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig
new file mode 100644
index 000000000..63afcbcdd
--- /dev/null
+++ b/vendor/github.com/spf13/viper/.editorconfig
@@ -0,0 +1,15 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.go]
+indent_style = tab
+
+[{Makefile, *.mk}]
+indent_style = tab
diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore
new file mode 100644
index 000000000..896250839
--- /dev/null
+++ b/vendor/github.com/spf13/viper/.gitignore
@@ -0,0 +1,5 @@
+/.idea/
+/bin/
+/build/
+/var/
+/vendor/
diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml
new file mode 100644
index 000000000..4f970acb1
--- /dev/null
+++ b/vendor/github.com/spf13/viper/.golangci.yml
@@ -0,0 +1,93 @@
+run:
+ timeout: 5m
+
+linters-settings:
+ gci:
+ local-prefixes: github.com/spf13/viper
+ golint:
+ min-confidence: 0
+ goimports:
+ local-prefixes: github.com/spf13/viper
+
+linters:
+ disable-all: true
+ enable:
+ - bodyclose
+ - deadcode
+ - dogsled
+ - dupl
+ - durationcheck
+ - exhaustive
+ - exportloopref
+ - gci
+ - goconst
+ - gofmt
+ - gofumpt
+ - goimports
+ - gomoddirectives
+ - goprintffuncname
+ - govet
+ - importas
+ - ineffassign
+ - makezero
+ - misspell
+ - nakedret
+ - nilerr
+ - noctx
+ - nolintlint
+ - prealloc
+ - predeclared
+ - revive
+ - rowserrcheck
+ - sqlclosecheck
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - tparallel
+ - typecheck
+ - unconvert
+ - unparam
+ - unused
+ - varcheck
+ - wastedassign
+ - whitespace
+
+ # fixme
+ # - cyclop
+ # - errcheck
+ # - errorlint
+ # - exhaustivestruct
+ # - forbidigo
+ # - forcetypeassert
+ # - gochecknoglobals
+ # - gochecknoinits
+ # - gocognit
+ # - gocritic
+ # - gocyclo
+ # - godot
+ # - gosec
+ # - gosimple
+ # - ifshort
+ # - lll
+ # - nlreturn
+ # - paralleltest
+ # - scopelint
+ # - thelper
+ # - wrapcheck
+
+ # unused
+ # - depguard
+ # - goheader
+ # - gomodguard
+
+ # don't enable:
+ # - asciicheck
+ # - funlen
+ # - godox
+ # - goerr113
+ # - gomnd
+ # - interfacer
+ # - maligned
+ # - nestif
+ # - testpackage
+ # - wsl
diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile
new file mode 100644
index 000000000..b0f9acf24
--- /dev/null
+++ b/vendor/github.com/spf13/viper/Makefile
@@ -0,0 +1,76 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+OS = $(shell uname | tr A-Z a-z)
+export PATH := $(abspath bin/):${PATH}
+
+# Build variables
+BUILD_DIR ?= build
+export CGO_ENABLED ?= 0
+export GOOS = $(shell go env GOOS)
+ifeq (${VERBOSE}, 1)
+ifeq ($(filter -v,${GOARGS}),)
+ GOARGS += -v
+endif
+TEST_FORMAT = short-verbose
+endif
+
+# Dependency versions
+GOTESTSUM_VERSION = 1.6.4
+GOLANGCI_VERSION = 1.40.1
+
+# Add the ability to override some variables
+# Use with care
+-include override.mk
+
+.PHONY: clear
+clear: ## Clear the working area and the project
+ rm -rf bin/
+
+.PHONY: check
+check: test lint ## Run tests and linters
+
+bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION}
+ @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum
+bin/gotestsum-${GOTESTSUM_VERSION}:
+ @mkdir -p bin
+ curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION}
+
+TEST_PKGS ?= ./...
+.PHONY: test
+test: TEST_FORMAT ?= short
+test: SHELL = /bin/bash
+test: export CGO_ENABLED=1
+test: bin/gotestsum ## Run tests
+ @mkdir -p ${BUILD_DIR}
+ bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...)
+
+bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION}
+ @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint
+bin/golangci-lint-${GOLANGCI_VERSION}:
+ @mkdir -p bin
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION}
+ @mv bin/golangci-lint "$@"
+
+.PHONY: lint
+lint: bin/golangci-lint ## Run linter
+ bin/golangci-lint run
+
+.PHONY: fix
+fix: bin/golangci-lint ## Fix lint violations
+ bin/golangci-lint run --fix
+
+# Add custom targets here
+-include custom.mk
+
+.PHONY: list
+list: ## List all make targets
+ @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort
+
+.PHONY: help
+.DEFAULT_GOAL := help
+help:
+ @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+# Variable outputting/exporting rules
+var-%: ; @echo $($*)
+varexport-%: ; @echo $*=$($*)
diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md
new file mode 100644
index 000000000..9712e7051
--- /dev/null
+++ b/vendor/github.com/spf13/viper/README.md
@@ -0,0 +1,874 @@
+> ## Viper v2 feedback
+> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9
+>
+> **Thank you!**
+
+![Viper](.github/logo.png?raw=true)
+
+[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration)
+[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go)
+
+[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI)
+[![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper)
+![Go Version](https://img.shields.io/badge/go%20version-%3E=1.14-61CFDD.svg?style=flat-square)
+[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper)
+
+**Go configuration with fangs!**
+
+Many Go projects are built using Viper including:
+
+* [Hugo](http://gohugo.io)
+* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
+* [Imgur’s Incus](https://github.com/Imgur/incus)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [Docker Notary](https://github.com/docker/Notary)
+* [BloomApi](https://www.bloomapi.com/)
+* [doctl](https://github.com/digitalocean/doctl)
+* [Clairctl](https://github.com/jgsqware/clairctl)
+* [Mercure](https://mercure.rocks)
+
+
+## Install
+
+```shell
+go get github.com/spf13/viper
+```
+
+**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
+
+
+## What is Viper?
+
+Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed
+to work within an application, and can handle all types of configuration needs
+and formats. It supports:
+
+* setting defaults
+* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files
+* live watching and re-reading of config files (optional)
+* reading from environment variables
+* reading from remote config systems (etcd or Consul), and watching changes
+* reading from command line flags
+* reading from buffer
+* setting explicit values
+
+Viper can be thought of as a registry for all of your applications configuration needs.
+
+
+## Why Viper?
+
+When building a modern application, you don’t want to worry about
+configuration file formats; you want to focus on building awesome software.
+Viper is here to help with that.
+
+Viper does the following for you:
+
+1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats.
+2. Provide a mechanism to set default values for your different configuration options.
+3. Provide a mechanism to set override values for options specified through command line flags.
+4. Provide an alias system to easily rename parameters without breaking existing code.
+5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default.
+
+Viper uses the following precedence order. Each item takes precedence over the item below it:
+
+ * explicit call to `Set`
+ * flag
+ * env
+ * config
+ * key/value store
+ * default
+
+**Important:** Viper configuration keys are case insensitive.
+There are ongoing discussions about making that optional.
+
+
+## Putting Values into Viper
+
+### Establishing Defaults
+
+A good configuration system will support default values. A default value is not
+required for a key, but it’s useful in the event that a key hasn't been set via
+config file, environment variable, remote configuration or flag.
+
+Examples:
+
+```go
+viper.SetDefault("ContentDir", "content")
+viper.SetDefault("LayoutDir", "layouts")
+viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"})
+```
+
+### Reading Config Files
+
+Viper requires minimal configuration so it knows where to look for config files.
+Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but
+currently a single Viper instance only supports a single configuration file.
+Viper does not default to any configuration search paths leaving defaults decision
+to an application.
+
+Here is an example of how to use Viper to search for and read a configuration file.
+None of the specific paths are required, but at least one path should be provided
+where a configuration file is expected.
+
+```go
+viper.SetConfigName("config") // name of config file (without extension)
+viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name
+viper.AddConfigPath("/etc/appname/") // path to look for the config file in
+viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
+viper.AddConfigPath(".") // optionally look for config in the working directory
+err := viper.ReadInConfig() // Find and read the config file
+if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %w \n", err))
+}
+```
+
+You can handle the specific case where no config file is found like this:
+
+```go
+if err := viper.ReadInConfig(); err != nil {
+ if _, ok := err.(viper.ConfigFileNotFoundError); ok {
+ // Config file not found; ignore error if desired
+ } else {
+ // Config file was found but another error was produced
+ }
+}
+
+// Config file found and successfully parsed
+```
+
+*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc`
+
+### Writing Config Files
+
+Reading from config files is useful, but at times you want to store all modifications made at run time.
+For that, a bunch of commands are available, each with its own purpose:
+
+* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists.
+* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists.
+* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists.
+* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists.
+
+As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate.
+
+A small examples section:
+
+```go
+viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName'
+viper.SafeWriteConfig()
+viper.WriteConfigAs("/path/to/my/.config")
+viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written
+viper.SafeWriteConfigAs("/path/to/my/.other_config")
+```
+
+### Watching and re-reading config files
+
+Viper supports the ability to have your application live read a config file while running.
+
+Gone are the days of needing to restart a server to have a config take effect,
+viper powered applications can read an update to a config file while running and
+not miss a beat.
+
+Simply tell the viper instance to watchConfig.
+Optionally you can provide a function for Viper to run each time a change occurs.
+
+**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
+
+```go
+viper.OnConfigChange(func(e fsnotify.Event) {
+ fmt.Println("Config file changed:", e.Name)
+})
+viper.WatchConfig()
+```
+
+### Reading Config from io.Reader
+
+Viper predefines many configuration sources such as files, environment
+variables, flags, and remote K/V store, but you are not bound to them. You can
+also implement your own required configuration source and feed it to viper.
+
+```go
+viper.SetConfigType("yaml") // or viper.SetConfigType("YAML")
+
+// any approach to require this configuration into your program.
+var yamlExample = []byte(`
+Hacker: true
+name: steve
+hobbies:
+- skateboarding
+- snowboarding
+- go
+clothing:
+ jacket: leather
+ trousers: denim
+age: 35
+eyes : brown
+beard: true
+`)
+
+viper.ReadConfig(bytes.NewBuffer(yamlExample))
+
+viper.Get("name") // this would be "steve"
+```
+
+### Setting Overrides
+
+These could be from a command line flag, or from your own application logic.
+
+```go
+viper.Set("Verbose", true)
+viper.Set("LogFile", LogFile)
+```
+
+### Registering and Using Aliases
+
+Aliases permit a single value to be referenced by multiple keys
+
+```go
+viper.RegisterAlias("loud", "Verbose")
+
+viper.Set("verbose", true) // same result as next line
+viper.Set("loud", true) // same result as prior line
+
+viper.GetBool("loud") // true
+viper.GetBool("verbose") // true
+```
+
+### Working with Environment Variables
+
+Viper has full support for environment variables. This enables 12 factor
+applications out of the box. There are five methods that exist to aid working
+with ENV:
+
+ * `AutomaticEnv()`
+ * `BindEnv(string...) : error`
+ * `SetEnvPrefix(string)`
+ * `SetEnvKeyReplacer(string...) *strings.Replacer`
+ * `AllowEmptyEnv(bool)`
+
+_When working with ENV variables, it’s important to recognize that Viper
+treats ENV variables as case sensitive._
+
+Viper provides a mechanism to try to ensure that ENV variables are unique. By
+using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from
+the environment variables. Both `BindEnv` and `AutomaticEnv` will use this
+prefix.
+
+`BindEnv` takes one or more parameters. The first parameter is the key name, the
+rest are the name of the environment variables to bind to this key. If more than
+one are provided, they will take precedence in the specified order. The name of
+the environment variable is case sensitive. If the ENV variable name is not provided, then
+Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter),
+it **does not** automatically add the prefix. For example if the second parameter is "id",
+Viper will look for the ENV variable "ID".
+
+One important thing to recognize when working with ENV variables is that the
+value will be read each time it is accessed. Viper does not fix the value when
+the `BindEnv` is called.
+
+`AutomaticEnv` is a powerful helper especially when combined with
+`SetEnvPrefix`. When called, Viper will check for an environment variable any
+time a `viper.Get` request is made. It will apply the following rules. It will
+check for an environment variable with a name matching the key uppercased and
+prefixed with the `EnvPrefix` if set.
+
+`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env
+keys to an extent. This is useful if you want to use `-` or something in your
+`Get()` calls, but want your environmental variables to use `_` delimiters. An
+example of using it can be found in `viper_test.go`.
+
+Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function.
+Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic.
+
+By default empty environment variables are considered unset and will fall back to
+the next configuration source. To treat empty environment variables as set, use
+the `AllowEmptyEnv` method.
+
+#### Env example
+
+```go
+SetEnvPrefix("spf") // will be uppercased automatically
+BindEnv("id")
+
+os.Setenv("SPF_ID", "13") // typically done outside of the app
+
+id := Get("id") // 13
+```
+
+### Working with Flags
+
+Viper has the ability to bind to flags. Specifically, Viper supports `Pflags`
+as used in the [Cobra](https://github.com/spf13/cobra) library.
+
+Like `BindEnv`, the value is not set when the binding method is called, but when
+it is accessed. This means you can bind as early as you want, even in an
+`init()` function.
+
+For individual flags, the `BindPFlag()` method provides this functionality.
+
+Example:
+
+```go
+serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+```
+
+You can also bind an existing set of pflags (pflag.FlagSet):
+
+Example:
+
+```go
+pflag.Int("flagname", 1234, "help message for flagname")
+
+pflag.Parse()
+viper.BindPFlags(pflag.CommandLine)
+
+i := viper.GetInt("flagname") // retrieve values from viper instead of pflag
+```
+
+The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
+the use of other packages that use the [flag](https://golang.org/pkg/flag/)
+package from the standard library. The pflag package can handle the flags
+defined for the flag package by importing these flags. This is accomplished
+by a calling a convenience function provided by the pflag package called
+AddGoFlagSet().
+
+Example:
+
+```go
+package main
+
+import (
+ "flag"
+ "github.com/spf13/pflag"
+)
+
+func main() {
+
+ // using standard library "flag" package
+ flag.Int("flagname", 1234, "help message for flagname")
+
+ pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+ pflag.Parse()
+ viper.BindPFlags(pflag.CommandLine)
+
+ i := viper.GetInt("flagname") // retrieve value from viper
+
+ // ...
+}
+```
+
+#### Flag interfaces
+
+Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`.
+
+`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
+
+```go
+type myFlag struct {}
+func (f myFlag) HasChanged() bool { return false }
+func (f myFlag) Name() string { return "my-flag-name" }
+func (f myFlag) ValueString() string { return "my-flag-value" }
+func (f myFlag) ValueType() string { return "string" }
+```
+
+Once your flag implements this interface, you can simply tell Viper to bind it:
+
+```go
+viper.BindFlagValue("my-flag-name", myFlag{})
+```
+
+`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface:
+
+```go
+type myFlagSet struct {
+ flags []myFlag
+}
+
+func (f myFlagSet) VisitAll(fn func(FlagValue)) {
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+```
+
+Once your flag set implements this interface, you can simply tell Viper to bind it:
+
+```go
+fSet := myFlagSet{
+ flags: []myFlag{myFlag{}, myFlag{}},
+}
+viper.BindFlagValues("my-flags", fSet)
+```
+
+### Remote Key/Value Store Support
+
+To enable remote support in Viper, do a blank import of the `viper/remote`
+package:
+
+`import _ "github.com/spf13/viper/remote"`
+
+Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path
+in a Key/Value store such as etcd or Consul. These values take precedence over
+default values, but are overridden by configuration values retrieved from disk,
+flags, or environment variables.
+
+Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve
+configuration from the K/V store, which means that you can store your
+configuration values encrypted and have them automatically decrypted if you have
+the correct gpg keyring. Encryption is optional.
+
+You can use remote configuration in conjunction with local configuration, or
+independently of it.
+
+`crypt` has a command-line helper that you can use to put configurations in your
+K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
+
+```bash
+$ go get github.com/bketelsen/crypt/bin/crypt
+$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
+```
+
+Confirm that your value was set:
+
+```bash
+$ crypt get -plaintext /config/hugo.json
+```
+
+See the `crypt` documentation for examples of how to set encrypted values, or
+how to use Consul.
+
+### Remote Key/Value Store Example - Unencrypted
+
+#### etcd
+```go
+viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
+err := viper.ReadRemoteConfig()
+```
+
+#### Consul
+You need to set a key to Consul key/value storage with JSON value containing your desired config.
+For example, create a Consul key/value store key `MY_CONSUL_KEY` with value:
+
+```json
+{
+ "port": 8080,
+ "hostname": "myhostname.com"
+}
+```
+
+```go
+viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY")
+viper.SetConfigType("json") // Need to explicitly set this to json
+err := viper.ReadRemoteConfig()
+
+fmt.Println(viper.Get("port")) // 8080
+fmt.Println(viper.Get("hostname")) // myhostname.com
+```
+
+#### Firestore
+
+```go
+viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document")
+viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml"
+err := viper.ReadRemoteConfig()
+```
+
+Of course, you're allowed to use `SecureRemoteProvider` also
+
+### Remote Key/Value Store Example - Encrypted
+
+```go
+viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
+err := viper.ReadRemoteConfig()
+```
+
+### Watching Changes in etcd - Unencrypted
+
+```go
+// alternatively, you can create a new viper instance.
+var runtime_viper = viper.New()
+
+runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml")
+runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
+
+// read from remote config the first time.
+err := runtime_viper.ReadRemoteConfig()
+
+// unmarshal config
+runtime_viper.Unmarshal(&runtime_conf)
+
+// open a goroutine to watch remote changes forever
+go func(){
+ for {
+ time.Sleep(time.Second * 5) // delay after each request
+
+ // currently, only tested with etcd support
+ err := runtime_viper.WatchRemoteConfig()
+ if err != nil {
+ log.Errorf("unable to read remote config: %v", err)
+ continue
+ }
+
+ // unmarshal new config into our runtime config struct. you can also use channel
+ // to implement a signal to notify the system of the changes
+ runtime_viper.Unmarshal(&runtime_conf)
+ }
+}()
+```
+
+## Getting Values From Viper
+
+In Viper, there are a few ways to get a value depending on the value’s type.
+The following functions and methods exist:
+
+ * `Get(key string) : interface{}`
+ * `GetBool(key string) : bool`
+ * `GetFloat64(key string) : float64`
+ * `GetInt(key string) : int`
+ * `GetIntSlice(key string) : []int`
+ * `GetString(key string) : string`
+ * `GetStringMap(key string) : map[string]interface{}`
+ * `GetStringMapString(key string) : map[string]string`
+ * `GetStringSlice(key string) : []string`
+ * `GetTime(key string) : time.Time`
+ * `GetDuration(key string) : time.Duration`
+ * `IsSet(key string) : bool`
+ * `AllSettings() : map[string]interface{}`
+
+One important thing to recognize is that each Get function will return a zero
+value if it’s not found. To check if a given key exists, the `IsSet()` method
+has been provided.
+
+Example:
+```go
+viper.GetString("logfile") // case-insensitive Setting & Getting
+if viper.GetBool("verbose") {
+ fmt.Println("verbose enabled")
+}
+```
+### Accessing nested keys
+
+The accessor methods also accept formatted paths to deeply nested keys. For
+example, if the following JSON file is loaded:
+
+```json
+{
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+```
+
+Viper can access a nested field by passing a `.` delimited path of keys:
+
+```go
+GetString("datastore.metric.host") // (returns "127.0.0.1")
+```
+
+This obeys the precedence rules established above; the search for the path
+will cascade through the remaining configuration registries until found.
+
+For example, given this configuration file, both `datastore.metric.host` and
+`datastore.metric.port` are already defined (and may be overridden). If in addition
+`datastore.metric.protocol` was defined in the defaults, Viper would also find it.
+
+However, if `datastore.metric` was overridden (by a flag, an environment variable,
+the `Set()` method, …) with an immediate value, then all sub-keys of
+`datastore.metric` become undefined, they are “shadowed” by the higher-priority
+configuration level.
+
+Viper can access array indices by using numbers in the path. For example:
+
+```json
+{
+ "host": {
+ "address": "localhost",
+ "ports": [
+ 5799,
+ 6029
+ ]
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+GetInt("host.ports.1") // returns 6029
+
+```
+
+Lastly, if there exists a key that matches the delimited key path, its value
+will be returned instead. E.g.
+
+```json
+{
+ "datastore.metric.host": "0.0.0.0",
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+GetString("datastore.metric.host") // returns "0.0.0.0"
+```
+
+### Extracting a sub-tree
+
+When developing reusable modules, it's often useful to extract a subset of the configuration
+and pass it to a module. This way the module can be instantiated more than once, with different configurations.
+
+For example, an application might use multiple different cache stores for different purposes:
+
+```yaml
+cache:
+ cache1:
+ max-items: 100
+ item-size: 64
+ cache2:
+ max-items: 200
+ item-size: 80
+```
+
+We could pass the cache name to a module (eg. `NewCache("cache1")`),
+but it would require weird concatenation for accessing config keys and would be less separated from the global config.
+
+So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration:
+
+```go
+cache1Config := viper.Sub("cache.cache1")
+if cache1Config == nil { // Sub returns nil if the key cannot be found
+ panic("cache configuration not found")
+}
+
+cache1 := NewCache(cache1Config)
+```
+
+**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found.
+
+Internally, the `NewCache` function can address `max-items` and `item-size` keys directly:
+
+```go
+func NewCache(v *Viper) *Cache {
+ return &Cache{
+ MaxItems: v.GetInt("max-items"),
+ ItemSize: v.GetInt("item-size"),
+ }
+}
+```
+
+The resulting code is easy to test, since it's decoupled from the main config structure,
+and easier to reuse (for the same reason).
+
+
+### Unmarshaling
+
+You also have the option of Unmarshaling all or a specific value to a struct, map,
+etc.
+
+There are two methods to do this:
+
+ * `Unmarshal(rawVal interface{}) : error`
+ * `UnmarshalKey(key string, rawVal interface{}) : error`
+
+Example:
+
+```go
+type config struct {
+ Port int
+ Name string
+ PathMap string `mapstructure:"path_map"`
+}
+
+var C config
+
+err := viper.Unmarshal(&C)
+if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
+If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter),
+you have to change the delimiter:
+
+```go
+v := viper.NewWithOptions(viper.KeyDelimiter("::"))
+
+v.SetDefault("chart::values", map[string]interface{}{
+ "ingress": map[string]interface{}{
+ "annotations": map[string]interface{}{
+ "traefik.frontend.rule.type": "PathPrefix",
+ "traefik.ingress.kubernetes.io/ssl-redirect": "true",
+ },
+ },
+})
+
+type config struct {
+ Chart struct{
+ Values map[string]interface{}
+ }
+}
+
+var C config
+
+v.Unmarshal(&C)
+```
+
+Viper also supports unmarshaling into embedded structs:
+
+```go
+/*
+Example config:
+
+module:
+ enabled: true
+ token: 89h3f98hbwf987h3f98wenf89ehf
+*/
+type config struct {
+ Module struct {
+ Enabled bool
+
+ moduleConfig `mapstructure:",squash"`
+ }
+}
+
+// moduleConfig could be in a module specific package
+type moduleConfig struct {
+ Token string
+}
+
+var C config
+
+err := viper.Unmarshal(&C)
+if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
+Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default.
+
+### Decoding custom formats
+
+A frequently requested feature for Viper is adding more value formats and decoders.
+For example, parsing character (dot, comma, semicolon, etc) separated strings into slices.
+
+This is already available in Viper using mapstructure decode hooks.
+
+Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/).
+
+### Marshalling to string
+
+You may need to marshal all the settings held in viper into a string rather than write them to a file.
+You can use your favorite format's marshaller with the config returned by `AllSettings()`.
+
+```go
+import (
+ yaml "gopkg.in/yaml.v2"
+ // ...
+)
+
+func yamlStringSettings() string {
+ c := viper.AllSettings()
+ bs, err := yaml.Marshal(c)
+ if err != nil {
+ log.Fatalf("unable to marshal config to YAML: %v", err)
+ }
+ return string(bs)
+}
+```
+
+## Viper or Vipers?
+
+Viper comes ready to use out of the box. There is no configuration or
+initialization needed to begin using Viper. Since most applications will want
+to use a single central repository for their configuration, the viper package
+provides this. It is similar to a singleton.
+
+In all of the examples above, they demonstrate using viper in its singleton
+style approach.
+
+### Working with multiple vipers
+
+You can also create many different vipers for use in your application. Each will
+have its own unique set of configurations and values. Each can read from a
+different config file, key value store, etc. All of the functions that viper
+package supports are mirrored as methods on a viper.
+
+Example:
+
+```go
+x := viper.New()
+y := viper.New()
+
+x.SetDefault("ContentDir", "content")
+y.SetDefault("ContentDir", "foobar")
+
+//...
+```
+
+When working with multiple vipers, it is up to the user to keep track of the
+different vipers.
+
+
+## Q & A
+
+### Why is it called “Viper”?
+
+A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe))
+to [Cobra](https://github.com/spf13/cobra). While both can operate completely
+independently, together they make a powerful pair to handle much of your
+application foundation needs.
+
+### Why is it called “Cobra”?
+
+Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)?
+
+### Does Viper support case sensitive keys?
+
+**tl;dr:** No.
+
+Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars).
+In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive.
+
+There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much.
+
+You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9
+
+### Is it safe to concurrently read and write to a viper?
+
+No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic.
+
+## Troubleshooting
+
+See [TROUBLESHOOTING.md](TROUBLESHOOTING.md).
diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md
new file mode 100644
index 000000000..096277af7
--- /dev/null
+++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md
@@ -0,0 +1,23 @@
+# Troubleshooting
+
+## Unmarshaling doesn't work
+
+The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags.
+
+## Cannot find package
+
+Viper installation seems to fail a lot lately with the following (or a similar) error:
+
+```
+cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of:
+/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT)
+/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH)
+```
+
+As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`.
+Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch).
+
+The solution is easy: switch to using Go Modules.
+Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that.
+
+**tl;dr* `export GO111MODULE=on`
diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go
new file mode 100644
index 000000000..b5ddbf5d4
--- /dev/null
+++ b/vendor/github.com/spf13/viper/flags.go
@@ -0,0 +1,57 @@
+package viper
+
+import "github.com/spf13/pflag"
+
+// FlagValueSet is an interface that users can implement
+// to bind a set of flags to viper.
+type FlagValueSet interface {
+ VisitAll(fn func(FlagValue))
+}
+
+// FlagValue is an interface that users can implement
+// to bind different flags to viper.
+type FlagValue interface {
+ HasChanged() bool
+ Name() string
+ ValueString() string
+ ValueType() string
+}
+
+// pflagValueSet is a wrapper around *pflag.ValueSet
+// that implements FlagValueSet.
+type pflagValueSet struct {
+ flags *pflag.FlagSet
+}
+
+// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
+func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
+ p.flags.VisitAll(func(flag *pflag.Flag) {
+ fn(pflagValue{flag})
+ })
+}
+
+// pflagValue is a wrapper aroung *pflag.flag
+// that implements FlagValue
+type pflagValue struct {
+ flag *pflag.Flag
+}
+
+// HasChanged returns whether the flag has changes or not.
+func (p pflagValue) HasChanged() bool {
+ return p.flag.Changed
+}
+
+// Name returns the name of the flag.
+func (p pflagValue) Name() string {
+ return p.flag.Name
+}
+
+// ValueString returns the value of the flag as a string.
+func (p pflagValue) ValueString() string {
+ return p.flag.Value.String()
+}
+
+// ValueType returns the type of the flag as a string.
+func (p pflagValue) ValueType() string {
+ return p.flag.Value.Type()
+}
diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go
new file mode 100644
index 000000000..08b1bb66b
--- /dev/null
+++ b/vendor/github.com/spf13/viper/internal/encoding/decoder.go
@@ -0,0 +1,61 @@
+package encoding
+
+import (
+ "sync"
+)
+
+// Decoder decodes the contents of b into a v representation.
+// It's primarily used for decoding contents of a file into a map[string]interface{}.
+type Decoder interface {
+ Decode(b []byte, v interface{}) error
+}
+
+const (
+ // ErrDecoderNotFound is returned when there is no decoder registered for a format.
+ ErrDecoderNotFound = encodingError("decoder not found for this format")
+
+ // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format.
+ ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format")
+)
+
+// DecoderRegistry can choose an appropriate Decoder based on the provided format.
+type DecoderRegistry struct {
+ decoders map[string]Decoder
+
+ mu sync.RWMutex
+}
+
+// NewDecoderRegistry returns a new, initialized DecoderRegistry.
+func NewDecoderRegistry() *DecoderRegistry {
+ return &DecoderRegistry{
+ decoders: make(map[string]Decoder),
+ }
+}
+
+// RegisterDecoder registers a Decoder for a format.
+// Registering a Decoder for an already existing format is not supported.
+func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ if _, ok := e.decoders[format]; ok {
+ return ErrDecoderFormatAlreadyRegistered
+ }
+
+ e.decoders[format] = enc
+
+ return nil
+}
+
+// Decode calls the underlying Decoder based on the format.
+func (e *DecoderRegistry) Decode(format string, b []byte, v interface{}) error {
+ e.mu.RLock()
+ decoder, ok := e.decoders[format]
+ e.mu.RUnlock()
+
+ if !ok {
+ return ErrDecoderNotFound
+ }
+
+ return decoder.Decode(b, v)
+}
diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go
new file mode 100644
index 000000000..82c7996cb
--- /dev/null
+++ b/vendor/github.com/spf13/viper/internal/encoding/encoder.go
@@ -0,0 +1,60 @@
+package encoding
+
+import (
+ "sync"
+)
+
+// Encoder encodes the contents of v into a byte representation.
+// It's primarily used for encoding a map[string]interface{} into a file format.
+type Encoder interface {
+ Encode(v interface{}) ([]byte, error)
+}
+
+const (
+ // ErrEncoderNotFound is returned when there is no encoder registered for a format.
+ ErrEncoderNotFound = encodingError("encoder not found for this format")
+
+ // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format.
+ ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format")
+)
+
+// EncoderRegistry can choose an appropriate Encoder based on the provided format.
+type EncoderRegistry struct {
+ encoders map[string]Encoder
+
+ mu sync.RWMutex
+}
+
+// NewEncoderRegistry returns a new, initialized EncoderRegistry.
+func NewEncoderRegistry() *EncoderRegistry {
+ return &EncoderRegistry{
+ encoders: make(map[string]Encoder),
+ }
+}
+
+// RegisterEncoder registers an Encoder for a format.
+// Registering a Encoder for an already existing format is not supported.
+func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ if _, ok := e.encoders[format]; ok {
+ return ErrEncoderFormatAlreadyRegistered
+ }
+
+ e.encoders[format] = enc
+
+ return nil
+}
+
+func (e *EncoderRegistry) Encode(format string, v interface{}) ([]byte, error) {
+ e.mu.RLock()
+ encoder, ok := e.encoders[format]
+ e.mu.RUnlock()
+
+ if !ok {
+ return nil, ErrEncoderNotFound
+ }
+
+ return encoder.Encode(v)
+}
diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go
new file mode 100644
index 000000000..e4cde02d7
--- /dev/null
+++ b/vendor/github.com/spf13/viper/internal/encoding/error.go
@@ -0,0 +1,7 @@
+package encoding
+
+type encodingError string
+
+func (e encodingError) Error() string {
+ return string(e)
+}
diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go
new file mode 100644
index 000000000..f3e4ab122
--- /dev/null
+++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go
@@ -0,0 +1,40 @@
+package hcl
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/printer"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding.
+// TODO: add printer config to the codec?
+type Codec struct{}
+
+func (Codec) Encode(v interface{}) ([]byte, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: use printer.Format? Is the trailing newline an issue?
+
+ ast, err := hcl.Parse(string(b))
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+
+ err = printer.Fprint(&buf, ast.Node)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (Codec) Decode(b []byte, v interface{}) error {
+ return hcl.Unmarshal(b, v)
+}
diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go
new file mode 100644
index 000000000..dff9ec982
--- /dev/null
+++ b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go
@@ -0,0 +1,17 @@
+package json
+
+import (
+ "encoding/json"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding.
+type Codec struct{}
+
+func (Codec) Encode(v interface{}) ([]byte, error) {
+ // TODO: expose prefix and indent in the Codec as setting?
+ return json.MarshalIndent(v, "", " ")
+}
+
+func (Codec) Decode(b []byte, v interface{}) error {
+ return json.Unmarshal(b, v)
+}
diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
new file mode 100644
index 000000000..c043802b9
--- /dev/null
+++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
@@ -0,0 +1,45 @@
+package toml
+
+import (
+ "github.com/pelletier/go-toml"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding.
+type Codec struct{}
+
+func (Codec) Encode(v interface{}) ([]byte, error) {
+ if m, ok := v.(map[string]interface{}); ok {
+ t, err := toml.TreeFromMap(m)
+ if err != nil {
+ return nil, err
+ }
+
+ s, err := t.ToTomlString()
+ if err != nil {
+ return nil, err
+ }
+
+ return []byte(s), nil
+ }
+
+ return toml.Marshal(v)
+}
+
+func (Codec) Decode(b []byte, v interface{}) error {
+ tree, err := toml.LoadBytes(b)
+ if err != nil {
+ return err
+ }
+
+ if m, ok := v.(*map[string]interface{}); ok {
+ vmap := *m
+ tmap := tree.ToMap()
+ for k, v := range tmap {
+ vmap[k] = v
+ }
+
+ return nil
+ }
+
+ return tree.Unmarshal(v)
+}
diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
new file mode 100644
index 000000000..f94b26996
--- /dev/null
+++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
@@ -0,0 +1,14 @@
+package yaml
+
+import "gopkg.in/yaml.v2"
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding.
+type Codec struct{}
+
+func (Codec) Encode(v interface{}) ([]byte, error) {
+ return yaml.Marshal(v)
+}
+
+func (Codec) Decode(b []byte, v interface{}) error {
+ return yaml.Unmarshal(b, v)
+}
diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go
new file mode 100644
index 000000000..09d051a22
--- /dev/null
+++ b/vendor/github.com/spf13/viper/util.go
@@ -0,0 +1,218 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+package viper
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "github.com/spf13/afero"
+ "github.com/spf13/cast"
+ jww "github.com/spf13/jwalterweatherman"
+)
+
+// ConfigParseError denotes failing to parse configuration file.
+type ConfigParseError struct {
+ err error
+}
+
+// Error returns the formatted configuration error.
+func (pe ConfigParseError) Error() string {
+ return fmt.Sprintf("While parsing config: %s", pe.err.Error())
+}
+
+// toCaseInsensitiveValue checks if the value is a map;
+// if so, create a copy and lower-case the keys recursively.
+func toCaseInsensitiveValue(value interface{}) interface{} {
+ switch v := value.(type) {
+ case map[interface{}]interface{}:
+ value = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ value = copyAndInsensitiviseMap(v)
+ }
+
+ return value
+}
+
+// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
+// any map it makes case insensitive.
+func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
+ nm := make(map[string]interface{})
+
+ for key, val := range m {
+ lkey := strings.ToLower(key)
+ switch v := val.(type) {
+ case map[interface{}]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(v)
+ default:
+ nm[lkey] = v
+ }
+ }
+
+ return nm
+}
+
+func insensitiviseMap(m map[string]interface{}) {
+ for key, val := range m {
+ switch val.(type) {
+ case map[interface{}]interface{}:
+ // nested map: cast and recursively insensitivise
+ val = cast.ToStringMap(val)
+ insensitiviseMap(val.(map[string]interface{}))
+ case map[string]interface{}:
+ // nested map: recursively insensitivise
+ insensitiviseMap(val.(map[string]interface{}))
+ }
+
+ lower := strings.ToLower(key)
+ if key != lower {
+ // remove old key (not lower-cased)
+ delete(m, key)
+ }
+ // update map
+ m[lower] = val
+ }
+}
+
+func absPathify(inPath string) string {
+ jww.INFO.Println("Trying to resolve absolute path to", inPath)
+
+ if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) {
+ inPath = userHomeDir() + inPath[5:]
+ }
+
+ inPath = os.ExpandEnv(inPath)
+
+ if filepath.IsAbs(inPath) {
+ return filepath.Clean(inPath)
+ }
+
+ p, err := filepath.Abs(inPath)
+ if err == nil {
+ return filepath.Clean(p)
+ }
+
+ jww.ERROR.Println("Couldn't discover absolute path")
+ jww.ERROR.Println(err)
+ return ""
+}
+
+// Check if file Exists
+func exists(fs afero.Fs, path string) (bool, error) {
+ stat, err := fs.Stat(path)
+ if err == nil {
+ return !stat.IsDir(), nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" {
+ home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+ if home == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ return home
+ }
+ return os.Getenv("HOME")
+}
+
+func safeMul(a, b uint) uint {
+ c := a * b
+ if a > 1 && b > 1 && c/b != a {
+ return 0
+ }
+ return c
+}
+
+// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+func parseSizeInBytes(sizeStr string) uint {
+ sizeStr = strings.TrimSpace(sizeStr)
+ lastChar := len(sizeStr) - 1
+ multiplier := uint(1)
+
+ if lastChar > 0 {
+ if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+ if lastChar > 1 {
+ switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+ case 'k':
+ multiplier = 1 << 10
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'm':
+ multiplier = 1 << 20
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'g':
+ multiplier = 1 << 30
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ default:
+ multiplier = 1
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar])
+ }
+ }
+ }
+ }
+
+ size := cast.ToInt(sizeStr)
+ if size < 0 {
+ size = 0
+ }
+
+ return safeMul(uint(size), multiplier)
+}
+
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+ for _, k := range path {
+ m2, ok := m[k]
+ if !ok {
+ // intermediate key does not exist
+ // => create it and continue from there
+ m3 := make(map[string]interface{})
+ m[k] = m3
+ m = m3
+ continue
+ }
+ m3, ok := m2.(map[string]interface{})
+ if !ok {
+ // intermediate key is a value
+ // => replace with a new map
+ m3 = make(map[string]interface{})
+ m[k] = m3
+ }
+ // continue search from here
+ m = m3
+ }
+ return m
+}
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
new file mode 100644
index 000000000..9e2e3537f
--- /dev/null
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -0,0 +1,2156 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is an application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+// Each item takes precedence over the item below it:
+
+// overrides
+// flag
+// env
+// config
+// key/value store
+// default
+
+package viper
+
+import (
+ "bytes"
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/magiconair/properties"
+ "github.com/mitchellh/mapstructure"
+ "github.com/spf13/afero"
+ "github.com/spf13/cast"
+ jww "github.com/spf13/jwalterweatherman"
+ "github.com/spf13/pflag"
+ "github.com/subosito/gotenv"
+ "gopkg.in/ini.v1"
+
+ "github.com/spf13/viper/internal/encoding"
+ "github.com/spf13/viper/internal/encoding/hcl"
+ "github.com/spf13/viper/internal/encoding/json"
+ "github.com/spf13/viper/internal/encoding/toml"
+ "github.com/spf13/viper/internal/encoding/yaml"
+)
+
+// ConfigMarshalError happens when failing to marshal the configuration.
+type ConfigMarshalError struct {
+ err error
+}
+
+// Error returns the formatted configuration error.
+func (e ConfigMarshalError) Error() string {
+ return fmt.Sprintf("While marshaling config: %s", e.err.Error())
+}
+
+var v *Viper
+
+type RemoteResponse struct {
+ Value []byte
+ Error error
+}
+
+var (
+ encoderRegistry = encoding.NewEncoderRegistry()
+ decoderRegistry = encoding.NewDecoderRegistry()
+)
+
+func init() {
+ v = New()
+
+ {
+ codec := yaml.Codec{}
+
+ encoderRegistry.RegisterEncoder("yaml", codec)
+ decoderRegistry.RegisterDecoder("yaml", codec)
+
+ encoderRegistry.RegisterEncoder("yml", codec)
+ decoderRegistry.RegisterDecoder("yml", codec)
+ }
+
+ {
+ codec := json.Codec{}
+
+ encoderRegistry.RegisterEncoder("json", codec)
+ decoderRegistry.RegisterDecoder("json", codec)
+ }
+
+ {
+ codec := toml.Codec{}
+
+ encoderRegistry.RegisterEncoder("toml", codec)
+ decoderRegistry.RegisterDecoder("toml", codec)
+ }
+
+ {
+ codec := hcl.Codec{}
+
+ encoderRegistry.RegisterEncoder("hcl", codec)
+ decoderRegistry.RegisterDecoder("hcl", codec)
+
+ encoderRegistry.RegisterEncoder("tfvars", codec)
+ decoderRegistry.RegisterDecoder("tfvars", codec)
+ }
+}
+
+type remoteConfigFactory interface {
+ Get(rp RemoteProvider) (io.Reader, error)
+ Watch(rp RemoteProvider) (io.Reader, error)
+ WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
+}
+
+// RemoteConfig is optional, see the remote package
+var RemoteConfig remoteConfigFactory
+
+// UnsupportedConfigError denotes encountering an unsupported
+// configuration filetype.
+type UnsupportedConfigError string
+
+// Error returns the formatted configuration error.
+func (str UnsupportedConfigError) Error() string {
+ return fmt.Sprintf("Unsupported Config Type %q", string(str))
+}
+
+// UnsupportedRemoteProviderError denotes encountering an unsupported remote
+// provider. Currently only etcd and Consul are supported.
+type UnsupportedRemoteProviderError string
+
+// Error returns the formatted remote provider error.
+func (str UnsupportedRemoteProviderError) Error() string {
+ return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
+}
+
+// RemoteConfigError denotes encountering an error while trying to
+// pull the configuration from the remote provider.
+type RemoteConfigError string
+
+// Error returns the formatted remote provider error
+func (rce RemoteConfigError) Error() string {
+ return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
+}
+
+// ConfigFileNotFoundError denotes failing to find configuration file.
+type ConfigFileNotFoundError struct {
+ name, locations string
+}
+
+// Error returns the formatted configuration error.
+func (fnfe ConfigFileNotFoundError) Error() string {
+ return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
+}
+
+// ConfigFileAlreadyExistsError denotes failure to write new configuration file.
+type ConfigFileAlreadyExistsError string
+
+// Error returns the formatted error when configuration already exists.
+func (faee ConfigFileAlreadyExistsError) Error() string {
+ return fmt.Sprintf("Config File %q Already Exists", string(faee))
+}
+
+// A DecoderConfigOption can be passed to viper.Unmarshal to configure
+// mapstructure.DecoderConfig options
+type DecoderConfigOption func(*mapstructure.DecoderConfig)
+
+// DecodeHook returns a DecoderConfigOption which overrides the default
+// DecoderConfig.DecodeHook value, the default is:
+//
+// mapstructure.ComposeDecodeHookFunc(
+// mapstructure.StringToTimeDurationHookFunc(),
+// mapstructure.StringToSliceHookFunc(","),
+// )
+func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
+ return func(c *mapstructure.DecoderConfig) {
+ c.DecodeHook = hook
+ }
+}
+
+// Viper is a prioritized configuration registry. It
+// maintains a set of configuration sources, fetches
+// values to populate those, and provides them according
+// to the source's priority.
+// The priority of the sources is the following:
+// 1. overrides
+// 2. flags
+// 3. env. variables
+// 4. config file
+// 5. key/value store
+// 6. defaults
+//
+// For example, if values from the following sources were loaded:
+//
+// Defaults : {
+// "secret": "",
+// "user": "default",
+// "endpoint": "https://localhost"
+// }
+// Config : {
+// "user": "root"
+// "secret": "defaultsecret"
+// }
+// Env : {
+// "secret": "somesecretkey"
+// }
+//
+// The resulting config will have the following values:
+//
+// {
+// "secret": "somesecretkey",
+// "user": "root",
+// "endpoint": "https://localhost"
+// }
+//
+// Note: Vipers are not safe for concurrent Get() and Set() operations.
+type Viper struct {
+ // Delimiter that separates a list of keys
+ // used to access a nested value in one go
+ keyDelim string
+
+ // A set of paths to look for the config file in
+ configPaths []string
+
+ // The filesystem to read config from.
+ fs afero.Fs
+
+ // A set of remote providers to search for the configuration
+ remoteProviders []*defaultRemoteProvider
+
+ // Name of file to look for inside the path
+ configName string
+ configFile string
+ configType string
+ configPermissions os.FileMode
+ envPrefix string
+
+ // Specific commands for ini parsing
+ iniLoadOptions ini.LoadOptions
+
+ automaticEnvApplied bool
+ envKeyReplacer StringReplacer
+ allowEmptyEnv bool
+
+ config map[string]interface{}
+ override map[string]interface{}
+ defaults map[string]interface{}
+ kvstore map[string]interface{}
+ pflags map[string]FlagValue
+ env map[string][]string
+ aliases map[string]string
+ typeByDefValue bool
+
+ // Store read properties on the object so that we can write back in order with comments.
+ // This will only be used if the configuration read is a properties file.
+ properties *properties.Properties
+
+ onConfigChange func(fsnotify.Event)
+}
+
+// New returns an initialized Viper instance.
+func New() *Viper {
+ v := new(Viper)
+ v.keyDelim = "."
+ v.configName = "config"
+ v.configPermissions = os.FileMode(0644)
+ v.fs = afero.NewOsFs()
+ v.config = make(map[string]interface{})
+ v.override = make(map[string]interface{})
+ v.defaults = make(map[string]interface{})
+ v.kvstore = make(map[string]interface{})
+ v.pflags = make(map[string]FlagValue)
+ v.env = make(map[string][]string)
+ v.aliases = make(map[string]string)
+ v.typeByDefValue = false
+
+ return v
+}
+
+// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney.
+// If you're unfamiliar with this style,
+// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and
+// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis.
+type Option interface {
+ apply(v *Viper)
+}
+
+type optionFunc func(v *Viper)
+
+func (fn optionFunc) apply(v *Viper) {
+ fn(v)
+}
+
+// KeyDelimiter sets the delimiter used for determining key parts.
+// By default it's value is ".".
+func KeyDelimiter(d string) Option {
+ return optionFunc(func(v *Viper) {
+ v.keyDelim = d
+ })
+}
+
+// StringReplacer applies a set of replacements to a string.
+type StringReplacer interface {
+ // Replace returns a copy of s with all replacements performed.
+ Replace(s string) string
+}
+
+// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys.
+func EnvKeyReplacer(r StringReplacer) Option {
+ return optionFunc(func(v *Viper) {
+ v.envKeyReplacer = r
+ })
+}
+
+// NewWithOptions creates a new Viper instance.
+func NewWithOptions(opts ...Option) *Viper {
+ v := New()
+
+ for _, opt := range opts {
+ opt.apply(v)
+ }
+
+ return v
+}
+
+// Reset is intended for testing, will reset all to default settings.
+// In the public interface for the viper package so applications
+// can use it in their testing as well.
+func Reset() {
+ v = New()
+ SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"}
+ SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
+}
+
+type defaultRemoteProvider struct {
+ provider string
+ endpoint string
+ path string
+ secretKeyring string
+}
+
+func (rp defaultRemoteProvider) Provider() string {
+ return rp.provider
+}
+
+func (rp defaultRemoteProvider) Endpoint() string {
+ return rp.endpoint
+}
+
+func (rp defaultRemoteProvider) Path() string {
+ return rp.path
+}
+
+func (rp defaultRemoteProvider) SecretKeyring() string {
+ return rp.secretKeyring
+}
+
+// RemoteProvider stores the configuration necessary
+// to connect to a remote key/value store.
+// Optional secretKeyring to unencrypt encrypted values
+// can be provided.
+type RemoteProvider interface {
+ Provider() string
+ Endpoint() string
+ Path() string
+ SecretKeyring() string
+}
+
+// SupportedExts are universally supported extensions.
+var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"}
+
+// SupportedRemoteProviders are universally supported remote providers.
+var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
+
+func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
+func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
+ v.onConfigChange = run
+}
+
+func WatchConfig() { v.WatchConfig() }
+
+func (v *Viper) WatchConfig() {
+ initWG := sync.WaitGroup{}
+ initWG.Add(1)
+ go func() {
+ watcher, err := newWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+ // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
+ filename, err := v.getConfigFile()
+ if err != nil {
+ log.Printf("error: %v\n", err)
+ initWG.Done()
+ return
+ }
+
+ configFile := filepath.Clean(filename)
+ configDir, _ := filepath.Split(configFile)
+ realConfigFile, _ := filepath.EvalSymlinks(filename)
+
+ eventsWG := sync.WaitGroup{}
+ eventsWG.Add(1)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok { // 'Events' channel is closed
+ eventsWG.Done()
+ return
+ }
+ currentConfigFile, _ := filepath.EvalSymlinks(filename)
+ // we only care about the config file with the following cases:
+ // 1 - if the config file was modified or created
+ // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
+ const writeOrCreateMask = fsnotify.Write | fsnotify.Create
+ if (filepath.Clean(event.Name) == configFile &&
+ event.Op&writeOrCreateMask != 0) ||
+ (currentConfigFile != "" && currentConfigFile != realConfigFile) {
+ realConfigFile = currentConfigFile
+ err := v.ReadInConfig()
+ if err != nil {
+ log.Printf("error reading config file: %v\n", err)
+ }
+ if v.onConfigChange != nil {
+ v.onConfigChange(event)
+ }
+ } else if filepath.Clean(event.Name) == configFile &&
+ event.Op&fsnotify.Remove&fsnotify.Remove != 0 {
+ eventsWG.Done()
+ return
+ }
+
+ case err, ok := <-watcher.Errors:
+ if ok { // 'Errors' channel is not closed
+ log.Printf("watcher error: %v\n", err)
+ }
+ eventsWG.Done()
+ return
+ }
+ }
+ }()
+ watcher.Add(configDir)
+ initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on...
+ eventsWG.Wait() // now, wait for event loop to end in this go-routine...
+ }()
+ initWG.Wait() // make sure that the go routine above fully ended before returning
+}
+
+// SetConfigFile explicitly defines the path, name and extension of the config file.
+// Viper will use this and not check any of the config paths.
+func SetConfigFile(in string) { v.SetConfigFile(in) }
+
+func (v *Viper) SetConfigFile(in string) {
+ if in != "" {
+ v.configFile = in
+ }
+}
+
+// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
+// E.g. if your prefix is "spf", the env registry will look for env
+// variables that start with "SPF_".
+func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
+
+func (v *Viper) SetEnvPrefix(in string) {
+ if in != "" {
+ v.envPrefix = in
+ }
+}
+
+func (v *Viper) mergeWithEnvPrefix(in string) string {
+ if v.envPrefix != "" {
+ return strings.ToUpper(v.envPrefix + "_" + in)
+ }
+
+ return strings.ToUpper(in)
+}
+
+// AllowEmptyEnv tells Viper to consider set,
+// but empty environment variables as valid values instead of falling back.
+// For backward compatibility reasons this is false by default.
+func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) }
+
+func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) {
+ v.allowEmptyEnv = allowEmptyEnv
+}
+
+// TODO: should getEnv logic be moved into find(). Can generalize the use of
+// rewriting keys many things, Ex: Get('someKey') -> some_key
+// (camel case to snake case for JSON keys perhaps)
+
+// getEnv is a wrapper around os.Getenv which replaces characters in the original
+// key. This allows env vars which have different keys than the config object
+// keys.
+func (v *Viper) getEnv(key string) (string, bool) {
+ if v.envKeyReplacer != nil {
+ key = v.envKeyReplacer.Replace(key)
+ }
+
+ val, ok := os.LookupEnv(key)
+
+ return val, ok && (v.allowEmptyEnv || val != "")
+}
+
+// ConfigFileUsed returns the file used to populate the config registry.
+func ConfigFileUsed() string { return v.ConfigFileUsed() }
+func (v *Viper) ConfigFileUsed() string { return v.configFile }
+
+// AddConfigPath adds a path for Viper to search for the config file in.
+// Can be called multiple times to define multiple search paths.
+func AddConfigPath(in string) { v.AddConfigPath(in) }
+
+func (v *Viper) AddConfigPath(in string) {
+ if in != "" {
+ absin := absPathify(in)
+ jww.INFO.Println("adding", absin, "to paths to search")
+ if !stringInSlice(absin, v.configPaths) {
+ v.configPaths = append(v.configPaths, absin)
+ }
+ }
+}
+
+// AddRemoteProvider adds a remote configuration source.
+// Remote Providers are searched in the order they are added.
+// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+func AddRemoteProvider(provider, endpoint, path string) error {
+ return v.AddRemoteProvider(provider, endpoint, path)
+}
+
+func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+// AddSecureRemoteProvider adds a remote configuration source.
+// Secure Remote Providers are searched in the order they are added.
+// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+// Secure Remote Providers are implemented with github.com/bketelsen/crypt
+func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
+}
+
+func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ secretKeyring: secretkeyring,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
+ for _, y := range v.remoteProviders {
+ if reflect.DeepEqual(y, p) {
+ return true
+ }
+ }
+ return false
+}
+
+// searchMap recursively searches for a value for path in source map.
+// Returns nil if not found.
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ next, ok := source[path[0]]
+ if ok {
+ // Fast path
+ if len(path) == 1 {
+ return next
+ }
+
+ // Nested case
+ switch next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchMap(cast.ToStringMap(next), path[1:])
+ case map[string]interface{}:
+ // Type assertion is safe here since it is only reached
+ // if the type of `next` is the same as the type being asserted
+ return v.searchMap(next.(map[string]interface{}), path[1:])
+ default:
+ // got a value but nested key expected, return "nil" for not found
+ return nil
+ }
+ }
+ return nil
+}
+
+// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice.
+//
+// While searchMap() considers each path element as a single map key or slice index, this
+// function searches for, and prioritizes, merged path elements.
+// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
+// is also defined, this latter value is returned for path ["foo", "bar"].
+//
+// This should be useful only at config level (other maps may not contain dots
+// in their keys).
+//
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ // search for path prefixes, starting from the longest one
+ for i := len(path); i > 0; i-- {
+ prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
+
+ var val interface{}
+ switch sourceIndexable := source.(type) {
+ case []interface{}:
+ val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path)
+ case map[string]interface{}:
+ val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path)
+ }
+ if val != nil {
+ return val
+ }
+ }
+
+ // not found
+ return nil
+}
+
+// searchSliceWithPathPrefixes searches for a value for path in sourceSlice
+//
+// This function is part of the searchIndexableWithPathPrefixes recurring search and
+// should not be called directly from functions other than searchIndexableWithPathPrefixes.
+func (v *Viper) searchSliceWithPathPrefixes(
+ sourceSlice []interface{},
+ prefixKey string,
+ pathIndex int,
+ path []string,
+) interface{} {
+ // if the prefixKey is not a number or it is out of bounds of the slice
+ index, err := strconv.Atoi(prefixKey)
+ if err != nil || len(sourceSlice) <= index {
+ return nil
+ }
+
+ next := sourceSlice[index]
+
+ // Fast path
+ if pathIndex == len(path) {
+ return next
+ }
+
+ switch n := next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:])
+ case map[string]interface{}, []interface{}:
+ return v.searchIndexableWithPathPrefixes(n, path[pathIndex:])
+ default:
+ // got a value but nested key expected, do nothing and look for next prefix
+ }
+
+ // not found
+ return nil
+}
+
+// searchMapWithPathPrefixes searches for a value for path in sourceMap
+//
+// This function is part of the searchIndexableWithPathPrefixes recurring search and
+// should not be called directly from functions other than searchIndexableWithPathPrefixes.
+func (v *Viper) searchMapWithPathPrefixes(
+ sourceMap map[string]interface{},
+ prefixKey string,
+ pathIndex int,
+ path []string,
+) interface{} {
+ next, ok := sourceMap[prefixKey]
+ if !ok {
+ return nil
+ }
+
+ // Fast path
+ if pathIndex == len(path) {
+ return next
+ }
+
+ // Nested case
+ switch n := next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:])
+ case map[string]interface{}, []interface{}:
+ return v.searchIndexableWithPathPrefixes(n, path[pathIndex:])
+ default:
+ // got a value but nested key expected, do nothing and look for next prefix
+ }
+
+ // not found
+ return nil
+}
+
+// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
+// on its path in the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
+ var parentVal interface{}
+ for i := 1; i < len(path); i++ {
+ parentVal = v.searchMap(m, path[0:i])
+ if parentVal == nil {
+ // not found, no need to add more path elements
+ return ""
+ }
+ switch parentVal.(type) {
+ case map[interface{}]interface{}:
+ continue
+ case map[string]interface{}:
+ continue
+ default:
+ // parentVal is a regular value which shadows "path"
+ return strings.Join(path[0:i], v.keyDelim)
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
+// in a sub-path of the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
+ // unify input map
+ var m map[string]interface{}
+ switch mi.(type) {
+ case map[string]string, map[string]FlagValue:
+ m = cast.ToStringMap(mi)
+ default:
+ return ""
+ }
+
+ // scan paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if _, ok := m[parentKey]; ok {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
+// in the environment, when automatic env is on.
+// e.g., if "foo.bar" has a value in the environment, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// SetTypeByDefaultValue enables or disables the inference of a key value's
+// type when the Get function is used based upon a key's default value as
+// opposed to the value returned based on the normal fetch logic.
+//
+// For example, if a key has a default value of []string{} and the same key
+// is set via an environment variable to "a b c", a call to the Get function
+// would return a string slice for the key if the key's type is inferred by
+// the default value and the Get function would return:
+//
+// []string {"a", "b", "c"}
+//
+// Otherwise the Get function would return:
+//
+// "a b c"
+func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
+
+func (v *Viper) SetTypeByDefaultValue(enable bool) {
+ v.typeByDefValue = enable
+}
+
+// GetViper gets the global Viper instance.
+func GetViper() *Viper {
+ return v
+}
+
+// Get can retrieve any value given the key to use.
+// Get is case-insensitive for a key.
+// Get has the behavior of returning the value associated with the first
+// place from where it is set. Viper will check in the following order:
+// override, flag, env, config file, key/value store, default
+//
+// Get returns an interface. For a specific value use one of the Get____ methods.
+func Get(key string) interface{} { return v.Get(key) }
+
+func (v *Viper) Get(key string) interface{} {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey, true)
+ if val == nil {
+ return nil
+ }
+
+ if v.typeByDefValue {
+ // TODO(bep) this branch isn't covered by a single test.
+ valType := val
+ path := strings.Split(lcaseKey, v.keyDelim)
+ defVal := v.searchMap(v.defaults, path)
+ if defVal != nil {
+ valType = defVal
+ }
+
+ switch valType.(type) {
+ case bool:
+ return cast.ToBool(val)
+ case string:
+ return cast.ToString(val)
+ case int32, int16, int8, int:
+ return cast.ToInt(val)
+ case uint:
+ return cast.ToUint(val)
+ case uint32:
+ return cast.ToUint32(val)
+ case uint64:
+ return cast.ToUint64(val)
+ case int64:
+ return cast.ToInt64(val)
+ case float64, float32:
+ return cast.ToFloat64(val)
+ case time.Time:
+ return cast.ToTime(val)
+ case time.Duration:
+ return cast.ToDuration(val)
+ case []string:
+ return cast.ToStringSlice(val)
+ case []int:
+ return cast.ToIntSlice(val)
+ }
+ }
+
+ return val
+}
+
+// Sub returns new Viper instance representing a sub tree of this instance.
+// Sub is case-insensitive for a key.
+func Sub(key string) *Viper { return v.Sub(key) }
+
+func (v *Viper) Sub(key string) *Viper {
+ subv := New()
+ data := v.Get(key)
+ if data == nil {
+ return nil
+ }
+
+ if reflect.TypeOf(data).Kind() == reflect.Map {
+ subv.config = cast.ToStringMap(data)
+ return subv
+ }
+ return nil
+}
+
+// GetString returns the value associated with the key as a string.
+func GetString(key string) string { return v.GetString(key) }
+
+func (v *Viper) GetString(key string) string {
+ return cast.ToString(v.Get(key))
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func GetBool(key string) bool { return v.GetBool(key) }
+
+func (v *Viper) GetBool(key string) bool {
+ return cast.ToBool(v.Get(key))
+}
+
+// GetInt returns the value associated with the key as an integer.
+func GetInt(key string) int { return v.GetInt(key) }
+
+func (v *Viper) GetInt(key string) int {
+ return cast.ToInt(v.Get(key))
+}
+
+// GetInt32 returns the value associated with the key as an integer.
+func GetInt32(key string) int32 { return v.GetInt32(key) }
+
+func (v *Viper) GetInt32(key string) int32 {
+ return cast.ToInt32(v.Get(key))
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func GetInt64(key string) int64 { return v.GetInt64(key) }
+
+func (v *Viper) GetInt64(key string) int64 {
+ return cast.ToInt64(v.Get(key))
+}
+
+// GetUint returns the value associated with the key as an unsigned integer.
+func GetUint(key string) uint { return v.GetUint(key) }
+
+func (v *Viper) GetUint(key string) uint {
+ return cast.ToUint(v.Get(key))
+}
+
+// GetUint32 returns the value associated with the key as an unsigned integer.
+func GetUint32(key string) uint32 { return v.GetUint32(key) }
+
+func (v *Viper) GetUint32(key string) uint32 {
+ return cast.ToUint32(v.Get(key))
+}
+
+// GetUint64 returns the value associated with the key as an unsigned integer.
+func GetUint64(key string) uint64 { return v.GetUint64(key) }
+
+func (v *Viper) GetUint64(key string) uint64 {
+ return cast.ToUint64(v.Get(key))
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func GetFloat64(key string) float64 { return v.GetFloat64(key) }
+
+func (v *Viper) GetFloat64(key string) float64 {
+ return cast.ToFloat64(v.Get(key))
+}
+
+// GetTime returns the value associated with the key as time.
+func GetTime(key string) time.Time { return v.GetTime(key) }
+
+func (v *Viper) GetTime(key string) time.Time {
+ return cast.ToTime(v.Get(key))
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func GetDuration(key string) time.Duration { return v.GetDuration(key) }
+
+func (v *Viper) GetDuration(key string) time.Duration {
+ return cast.ToDuration(v.Get(key))
+}
+
+// GetIntSlice returns the value associated with the key as a slice of int values.
+func GetIntSlice(key string) []int { return v.GetIntSlice(key) }
+
+func (v *Viper) GetIntSlice(key string) []int {
+ return cast.ToIntSlice(v.Get(key))
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
+
+func (v *Viper) GetStringSlice(key string) []string {
+ return cast.ToStringSlice(v.Get(key))
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
+
+func (v *Viper) GetStringMap(key string) map[string]interface{} {
+ return cast.ToStringMap(v.Get(key))
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
+
+func (v *Viper) GetStringMapString(key string) map[string]string {
+ return cast.ToStringMapString(v.Get(key))
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
+
+func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
+ return cast.ToStringMapStringSlice(v.Get(key))
+}
+
+// GetSizeInBytes returns the size of the value associated with the given key
+// in bytes.
+func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
+
+func (v *Viper) GetSizeInBytes(key string) uint {
+ sizeStr := cast.ToString(v.Get(key))
+ return parseSizeInBytes(sizeStr)
+}
+
+// UnmarshalKey takes a single key and unmarshals it into a Struct.
+func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.UnmarshalKey(key, rawVal, opts...)
+}
+
+func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+ return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
+}
+
+// Unmarshal unmarshals the config into a Struct. Make sure that the tags
+// on the fields of the structure are properly set.
+func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.Unmarshal(rawVal, opts...)
+}
+
+func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+ return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
+}
+
+// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
+// of time.Duration values & string slices
+func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
+ c := &mapstructure.DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.ComposeDecodeHookFunc(
+ mapstructure.StringToTimeDurationHookFunc(),
+ mapstructure.StringToSliceHookFunc(","),
+ ),
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ return c
+}
+
+// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
+func decode(input interface{}, config *mapstructure.DecoderConfig) error {
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+ return decoder.Decode(input)
+}
+
+// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
+// in the destination struct.
+func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.UnmarshalExact(rawVal, opts...)
+}
+
+func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
+ config := defaultDecoderConfig(rawVal, opts...)
+ config.ErrorUnused = true
+
+ return decode(v.AllSettings(), config)
+}
+
+// BindPFlags binds a full flag set to the configuration, using each flag's long
+// name as the config key.
+func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
+
+func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
+ return v.BindFlagValues(pflagValueSet{flags})
+}
+
+// BindPFlag binds a specific key to a pflag (as used by cobra).
+// Example (where serverCmd is a Cobra instance):
+//
+// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+//
+func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
+
+func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
+ if flag == nil {
+ return fmt.Errorf("flag for %q is nil", key)
+ }
+ return v.BindFlagValue(key, pflagValue{flag})
+}
+
+// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
+// name as the config key.
+func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
+
+func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
+ flags.VisitAll(func(flag FlagValue) {
+ if err = v.BindFlagValue(flag.Name(), flag); err != nil {
+ return
+ }
+ })
+ return nil
+}
+
+// BindFlagValue binds a specific key to a FlagValue.
+func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
+
+func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
+ if flag == nil {
+ return fmt.Errorf("flag for %q is nil", key)
+ }
+ v.pflags[strings.ToLower(key)] = flag
+ return nil
+}
+
+// BindEnv binds a Viper key to a ENV variable.
+// ENV variables are case sensitive.
+// If only a key is provided, it will use the env key matching the key, uppercased.
+// If more arguments are provided, they will represent the env variable names that
+// should bind to this key and will be taken in the specified order.
+// EnvPrefix will be used when set when env name is not provided.
+func BindEnv(input ...string) error { return v.BindEnv(input...) }
+
+func (v *Viper) BindEnv(input ...string) error {
+ if len(input) == 0 {
+ return fmt.Errorf("missing key to bind to")
+ }
+
+ key := strings.ToLower(input[0])
+
+ if len(input) == 1 {
+ v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key))
+ } else {
+ v.env[key] = append(v.env[key], input[1:]...)
+ }
+
+ return nil
+}
+
+// Given a key, find the value.
+//
+// Viper will check to see if an alias exists first.
+// Viper will then check in the following order:
+// flag, env, config file, key/value store.
+// Lastly, if no value was found and flagDefault is true, and if the key
+// corresponds to a flag, the flag's default value is returned.
+//
+// Note: this assumes a lower-cased key given.
+func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} {
+ var (
+ val interface{}
+ exists bool
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+ )
+
+ // compute the path through the nested maps to the nested value
+ if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
+ return nil
+ }
+
+ // if the requested key is an alias, then return the proper key
+ lcaseKey = v.realKey(lcaseKey)
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+
+ // Set() override first
+ val = v.searchMap(v.override, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
+ return nil
+ }
+
+ // PFlag override next
+ flag, exists := v.pflags[lcaseKey]
+ if exists && flag.HasChanged() {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice", "stringArray":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
+ case "intSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return cast.ToIntSlice(res)
+ case "stringToString":
+ return stringToStringConv(flag.ValueString())
+ default:
+ return flag.ValueString()
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
+ return nil
+ }
+
+ // Env override next
+ if v.automaticEnvApplied {
+ // even if it hasn't been registered, if automaticEnv is used,
+ // check any Get request
+ if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok {
+ return val
+ }
+ if nested && v.isPathShadowedInAutoEnv(path) != "" {
+ return nil
+ }
+ }
+ envkeys, exists := v.env[lcaseKey]
+ if exists {
+ for _, envkey := range envkeys {
+ if val, ok := v.getEnv(envkey); ok {
+ return val
+ }
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
+ return nil
+ }
+
+ // Config file next
+ val = v.searchIndexableWithPathPrefixes(v.config, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
+ return nil
+ }
+
+ // K/V store next
+ val = v.searchMap(v.kvstore, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
+ return nil
+ }
+
+ // Default next
+ val = v.searchMap(v.defaults, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
+ return nil
+ }
+
+ if flagDefault {
+ // last chance: if no value is found and a flag does exist for the key,
+ // get the flag's default value even if the flag's value has not been set.
+ if flag, exists := v.pflags[lcaseKey]; exists {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice", "stringArray":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
+ case "intSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return cast.ToIntSlice(res)
+ case "stringToString":
+ return stringToStringConv(flag.ValueString())
+ default:
+ return flag.ValueString()
+ }
+ }
+ // last item, no need to check shadowing
+ }
+
+ return nil
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79
+// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap
+func stringToStringConv(val string) interface{} {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]interface{}{}
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil
+ }
+ out := make(map[string]interface{}, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out
+}
+
+// IsSet checks to see if the key has been set in any of the data locations.
+// IsSet is case-insensitive for a key.
+func IsSet(key string) bool { return v.IsSet(key) }
+
+func (v *Viper) IsSet(key string) bool {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey, false)
+ return val != nil
+}
+
+// AutomaticEnv makes Viper check if environment variables match any of the existing keys
+// (config, default or flags). If matching env vars are found, they are loaded into Viper.
+func AutomaticEnv() { v.AutomaticEnv() }
+
+func (v *Viper) AutomaticEnv() {
+ v.automaticEnvApplied = true
+}
+
+// SetEnvKeyReplacer sets the strings.Replacer on the viper object
+// Useful for mapping an environmental variable to a key that does
+// not match it.
+func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
+
+func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
+ v.envKeyReplacer = r
+}
+
+// RegisterAlias creates an alias that provides another accessor for the same key.
+// This enables one to change a name without breaking the application.
+func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
+
+func (v *Viper) RegisterAlias(alias string, key string) {
+ v.registerAlias(alias, strings.ToLower(key))
+}
+
+func (v *Viper) registerAlias(alias string, key string) {
+ alias = strings.ToLower(alias)
+ if alias != key && alias != v.realKey(key) {
+ _, exists := v.aliases[alias]
+
+ if !exists {
+ // if we alias something that exists in one of the maps to another
+ // name, we'll never be able to get that value using the original
+ // name, so move the config value to the new realkey.
+ if val, ok := v.config[alias]; ok {
+ delete(v.config, alias)
+ v.config[key] = val
+ }
+ if val, ok := v.kvstore[alias]; ok {
+ delete(v.kvstore, alias)
+ v.kvstore[key] = val
+ }
+ if val, ok := v.defaults[alias]; ok {
+ delete(v.defaults, alias)
+ v.defaults[key] = val
+ }
+ if val, ok := v.override[alias]; ok {
+ delete(v.override, alias)
+ v.override[key] = val
+ }
+ v.aliases[alias] = key
+ }
+ } else {
+ jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
+ }
+}
+
+func (v *Viper) realKey(key string) string {
+ newkey, exists := v.aliases[key]
+ if exists {
+ jww.DEBUG.Println("Alias", key, "to", newkey)
+ return v.realKey(newkey)
+ }
+ return key
+}
+
+// InConfig checks to see if the given key (or an alias) is in the config file.
+func InConfig(key string) bool { return v.InConfig(key) }
+
+func (v *Viper) InConfig(key string) bool {
+ lcaseKey := strings.ToLower(key)
+
+ // if the requested key is an alias, then return the proper key
+ lcaseKey = v.realKey(lcaseKey)
+ path := strings.Split(lcaseKey, v.keyDelim)
+
+ return v.searchIndexableWithPathPrefixes(v.config, path) != nil
+}
+
+// SetDefault sets the default value for this key.
+// SetDefault is case-insensitive for a key.
+// Default only used when no value is provided by the user via flag, config or ENV.
+func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
+
+func (v *Viper) SetDefault(key string, value interface{}) {
+ // If alias passed in, then set the proper default
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+}
+
+// Set sets the value for the key in the override register.
+// Set is case-insensitive for a key.
+// Will be used instead of values obtained via
+// flags, config file, ENV, default, or key/value store.
+func Set(key string, value interface{}) { v.Set(key, value) }
+
+func (v *Viper) Set(key string, value interface{}) {
+ // If alias passed in, then set the proper override
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.override, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+}
+
+// ReadInConfig will discover and load the configuration file from disk
+// and key/value stores, searching in one of the defined paths.
+func ReadInConfig() error { return v.ReadInConfig() }
+
+func (v *Viper) ReadInConfig() error {
+ jww.INFO.Println("Attempting to read in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ jww.DEBUG.Println("Reading file: ", filename)
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ config := make(map[string]interface{})
+
+ err = v.unmarshalReader(bytes.NewReader(file), config)
+ if err != nil {
+ return err
+ }
+
+ v.config = config
+ return nil
+}
+
+// MergeInConfig merges a new configuration with an existing config.
+func MergeInConfig() error { return v.MergeInConfig() }
+
+func (v *Viper) MergeInConfig() error {
+ jww.INFO.Println("Attempting to merge in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ return v.MergeConfig(bytes.NewReader(file))
+}
+
+// ReadConfig will read a configuration file, setting existing keys to nil if the
+// key does not exist in the file.
+func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
+
+func (v *Viper) ReadConfig(in io.Reader) error {
+ v.config = make(map[string]interface{})
+ return v.unmarshalReader(in, v.config)
+}
+
+// MergeConfig merges a new configuration with an existing config.
+func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
+
+func (v *Viper) MergeConfig(in io.Reader) error {
+ cfg := make(map[string]interface{})
+ if err := v.unmarshalReader(in, cfg); err != nil {
+ return err
+ }
+ return v.MergeConfigMap(cfg)
+}
+
+// MergeConfigMap merges the configuration from the map given with an existing config.
+// Note that the map given may be modified.
+func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) }
+
+func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error {
+ if v.config == nil {
+ v.config = make(map[string]interface{})
+ }
+ insensitiviseMap(cfg)
+ mergeMaps(cfg, v.config, nil)
+ return nil
+}
+
+// WriteConfig writes the current configuration to a file.
+func WriteConfig() error { return v.WriteConfig() }
+
+func (v *Viper) WriteConfig() error {
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+ return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfig writes current configuration to file only if the file does not exist.
+func SafeWriteConfig() error { return v.SafeWriteConfig() }
+
+func (v *Viper) SafeWriteConfig() error {
+ if len(v.configPaths) < 1 {
+ return errors.New("missing configuration for 'configPath'")
+ }
+ return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType))
+}
+
+// WriteConfigAs writes current configuration to a given filename.
+func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
+
+func (v *Viper) WriteConfigAs(filename string) error {
+ return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
+func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
+
+func (v *Viper) SafeWriteConfigAs(filename string) error {
+ alreadyExists, err := afero.Exists(v.fs, filename)
+ if alreadyExists && err == nil {
+ return ConfigFileAlreadyExistsError(filename)
+ }
+ return v.writeConfig(filename, false)
+}
+
+func (v *Viper) writeConfig(filename string, force bool) error {
+ jww.INFO.Println("Attempting to write configuration to file.")
+ var configType string
+
+ ext := filepath.Ext(filename)
+ if ext != "" && ext != filepath.Base(filename) {
+ configType = ext[1:]
+ } else {
+ configType = v.configType
+ }
+ if configType == "" {
+ return fmt.Errorf("config type could not be determined for %s", filename)
+ }
+
+ if !stringInSlice(configType, SupportedExts) {
+ return UnsupportedConfigError(configType)
+ }
+ if v.config == nil {
+ v.config = make(map[string]interface{})
+ }
+ flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY
+ if !force {
+ flags |= os.O_EXCL
+ }
+ f, err := v.fs.OpenFile(filename, flags, v.configPermissions)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if err := v.marshalWriter(f, configType); err != nil {
+ return err
+ }
+
+ return f.Sync()
+}
+
+// Unmarshal a Reader into a map.
+// Should probably be an unexported function.
+func unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ return v.unmarshalReader(in, c)
+}
+
+func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(in)
+
+ switch format := strings.ToLower(v.getConfigType()); format {
+ case "yaml", "yml", "json", "toml", "hcl", "tfvars":
+ err := decoderRegistry.Decode(format, buf.Bytes(), &c)
+ if err != nil {
+ return ConfigParseError{err}
+ }
+
+ case "dotenv", "env":
+ env, err := gotenv.StrictParse(buf)
+ if err != nil {
+ return ConfigParseError{err}
+ }
+ for k, v := range env {
+ c[k] = v
+ }
+
+ case "properties", "props", "prop":
+ v.properties = properties.NewProperties()
+ var err error
+ if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
+ return ConfigParseError{err}
+ }
+ for _, key := range v.properties.Keys() {
+ value, _ := v.properties.Get(key)
+ // recursively build nested maps
+ path := strings.Split(key, ".")
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(c, path[0:len(path)-1])
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+
+ case "ini":
+ cfg := ini.Empty(v.iniLoadOptions)
+ err := cfg.Append(buf.Bytes())
+ if err != nil {
+ return ConfigParseError{err}
+ }
+ sections := cfg.Sections()
+ for i := 0; i < len(sections); i++ {
+ section := sections[i]
+ keys := section.Keys()
+ for j := 0; j < len(keys); j++ {
+ key := keys[j]
+ value := cfg.Section(section.Name()).Key(key.Name()).String()
+ c[section.Name()+"."+key.Name()] = value
+ }
+ }
+ }
+
+ insensitiviseMap(c)
+ return nil
+}
+
+// Marshal a map into Writer.
+func (v *Viper) marshalWriter(f afero.File, configType string) error {
+ c := v.AllSettings()
+ switch configType {
+ case "yaml", "yml", "json", "toml", "hcl", "tfvars":
+ b, err := encoderRegistry.Encode(configType, c)
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ _, err = f.WriteString(string(b))
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ case "prop", "props", "properties":
+ if v.properties == nil {
+ v.properties = properties.NewProperties()
+ }
+ p := v.properties
+ for _, key := range v.AllKeys() {
+ _, _, err := p.Set(key, v.GetString(key))
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+ }
+ _, err := p.WriteComment(f, "#", properties.UTF8)
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ case "dotenv", "env":
+ lines := []string{}
+ for _, key := range v.AllKeys() {
+ envName := strings.ToUpper(strings.Replace(key, ".", "_", -1))
+ val := v.Get(key)
+ lines = append(lines, fmt.Sprintf("%v=%v", envName, val))
+ }
+ s := strings.Join(lines, "\n")
+ if _, err := f.WriteString(s); err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ case "ini":
+ keys := v.AllKeys()
+ cfg := ini.Empty()
+ ini.PrettyFormat = false
+ for i := 0; i < len(keys); i++ {
+ key := keys[i]
+ lastSep := strings.LastIndex(key, ".")
+ sectionName := key[:(lastSep)]
+ keyName := key[(lastSep + 1):]
+ if sectionName == "default" {
+ sectionName = ""
+ }
+ cfg.Section(sectionName).Key(keyName).SetValue(v.GetString(key))
+ }
+ cfg.WriteTo(f)
+ }
+ return nil
+}
+
+func keyExists(k string, m map[string]interface{}) string {
+ lk := strings.ToLower(k)
+ for mk := range m {
+ lmk := strings.ToLower(mk)
+ if lmk == lk {
+ return mk
+ }
+ }
+ return ""
+}
+
+func castToMapStringInterface(
+ src map[interface{}]interface{}) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[fmt.Sprintf("%v", k)] = v
+ }
+ return tgt
+}
+
+func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
+// insistence on parsing nested structures as `map[interface{}]interface{}`
+// instead of using a `string` as the key for nest structures beyond one level
+// deep. Both map types are supported as there is a go-yaml fork that uses
+// `map[string]interface{}` instead.
+func mergeMaps(
+ src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
+ for sk, sv := range src {
+ tk := keyExists(sk, tgt)
+ if tk == "" {
+ jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ tv, ok := tgt[tk]
+ if !ok {
+ jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ svType := reflect.TypeOf(sv)
+ tvType := reflect.TypeOf(tv)
+ if tvType != nil && svType != tvType { // Allow for the target to be nil
+ jww.ERROR.Printf(
+ "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+ sk, svType, tvType, sv, tv)
+ continue
+ }
+
+ jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+ sk, svType, tvType, sv, tv)
+
+ switch ttv := tv.(type) {
+ case map[interface{}]interface{}:
+ jww.TRACE.Printf("merging maps (must convert)")
+ tsv := sv.(map[interface{}]interface{})
+ ssv := castToMapStringInterface(tsv)
+ stv := castToMapStringInterface(ttv)
+ mergeMaps(ssv, stv, ttv)
+ case map[string]interface{}:
+ jww.TRACE.Printf("merging maps")
+ mergeMaps(sv.(map[string]interface{}), ttv, nil)
+ default:
+ jww.TRACE.Printf("setting value")
+ tgt[tk] = sv
+ if itgt != nil {
+ itgt[tk] = sv
+ }
+ }
+ }
+}
+
+// ReadRemoteConfig attempts to get configuration from a remote source
+// and read it in the remote configuration registry.
+func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
+
+func (v *Viper) ReadRemoteConfig() error {
+ return v.getKeyValueConfig()
+}
+
+func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
+func (v *Viper) WatchRemoteConfig() error {
+ return v.watchKeyValueConfig()
+}
+
+func (v *Viper) WatchRemoteConfigOnChannel() error {
+ return v.watchKeyValueConfigOnChannel()
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) getKeyValueConfig() error {
+ if RemoteConfig == nil {
+ return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
+ }
+
+ for _, rp := range v.remoteProviders {
+ val, err := v.getRemoteConfig(rp)
+ if err != nil {
+ jww.ERROR.Printf("get remote config: %s", err)
+
+ continue
+ }
+
+ v.kvstore = val
+
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Get(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfigOnChannel() error {
+ for _, rp := range v.remoteProviders {
+ respc, _ := RemoteConfig.WatchChannel(rp)
+ // Todo: Add quit channel
+ go func(rc <-chan *RemoteResponse) {
+ for {
+ b := <-rc
+ reader := bytes.NewReader(b.Value)
+ v.unmarshalReader(reader, v.kvstore)
+ }
+ }(respc)
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfig() error {
+ for _, rp := range v.remoteProviders {
+ val, err := v.watchRemoteConfig(rp)
+ if err != nil {
+ continue
+ }
+ v.kvstore = val
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Watch(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// AllKeys returns all keys holding a value, regardless of where they are set.
+// Nested keys are returned with a v.keyDelim separator
+func AllKeys() []string { return v.AllKeys() }
+
+func (v *Viper) AllKeys() []string {
+ m := map[string]bool{}
+ // add all paths, by order of descending priority to ensure correct shadowing
+ m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
+ m = v.flattenAndMergeMap(m, v.override, "")
+ m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
+ m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env))
+ m = v.flattenAndMergeMap(m, v.config, "")
+ m = v.flattenAndMergeMap(m, v.kvstore, "")
+ m = v.flattenAndMergeMap(m, v.defaults, "")
+
+ // convert set of paths to list
+ a := make([]string, 0, len(m))
+ for x := range m {
+ a = append(a, x)
+ }
+ return a
+}
+
+// flattenAndMergeMap recursively flattens the given map into a map[string]bool
+// of key paths (used as a set, easier to manipulate than a []string):
+// - each path is merged into a single key string, delimited with v.keyDelim
+// - if a path is shadowed by an earlier value in the initial shadow map,
+// it is skipped.
+// The resulting set of paths is merged to the given shadow set at the same time.
+func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
+ if shadow != nil && prefix != "" && shadow[prefix] {
+ // prefix is shadowed => nothing more to flatten
+ return shadow
+ }
+ if shadow == nil {
+ shadow = make(map[string]bool)
+ }
+
+ var m2 map[string]interface{}
+ if prefix != "" {
+ prefix += v.keyDelim
+ }
+ for k, val := range m {
+ fullKey := prefix + k
+ switch val.(type) {
+ case map[string]interface{}:
+ m2 = val.(map[string]interface{})
+ case map[interface{}]interface{}:
+ m2 = cast.ToStringMap(val)
+ default:
+ // immediate value
+ shadow[strings.ToLower(fullKey)] = true
+ continue
+ }
+ // recursively merge to shadow map
+ shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
+ }
+ return shadow
+}
+
+// mergeFlatMap merges the given maps, excluding values of the second map
+// shadowed by values from the first map.
+func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
+ // scan keys
+outer:
+ for k := range m {
+ path := strings.Split(k, v.keyDelim)
+ // scan intermediate paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if shadow[parentKey] {
+ // path is shadowed, continue
+ continue outer
+ }
+ }
+ // add key
+ shadow[strings.ToLower(k)] = true
+ }
+ return shadow
+}
+
+// AllSettings merges all settings and returns them as a map[string]interface{}.
+func AllSettings() map[string]interface{} { return v.AllSettings() }
+
+func (v *Viper) AllSettings() map[string]interface{} {
+ m := map[string]interface{}{}
+ // start from the list of keys, and construct the map one value at a time
+ for _, k := range v.AllKeys() {
+ value := v.Get(k)
+ if value == nil {
+ // should not happen, since AllKeys() returns only keys holding a value,
+ // check just in case anything changes
+ continue
+ }
+ path := strings.Split(k, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(m, path[0:len(path)-1])
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+ return m
+}
+
+// SetFs sets the filesystem to use to read configuration.
+func SetFs(fs afero.Fs) { v.SetFs(fs) }
+
+func (v *Viper) SetFs(fs afero.Fs) {
+ v.fs = fs
+}
+
+// SetConfigName sets name for the config file.
+// Does not include extension.
+func SetConfigName(in string) { v.SetConfigName(in) }
+
+func (v *Viper) SetConfigName(in string) {
+ if in != "" {
+ v.configName = in
+ v.configFile = ""
+ }
+}
+
+// SetConfigType sets the type of the configuration returned by the
+// remote source, e.g. "json".
+func SetConfigType(in string) { v.SetConfigType(in) }
+
+func (v *Viper) SetConfigType(in string) {
+ if in != "" {
+ v.configType = in
+ }
+}
+
+// SetConfigPermissions sets the permissions for the config file.
+func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) }
+
+func (v *Viper) SetConfigPermissions(perm os.FileMode) {
+ v.configPermissions = perm.Perm()
+}
+
+// IniLoadOptions sets the load options for ini parsing.
+func IniLoadOptions(in ini.LoadOptions) Option {
+ return optionFunc(func(v *Viper) {
+ v.iniLoadOptions = in
+ })
+}
+
+func (v *Viper) getConfigType() string {
+ if v.configType != "" {
+ return v.configType
+ }
+
+ cf, err := v.getConfigFile()
+ if err != nil {
+ return ""
+ }
+
+ ext := filepath.Ext(cf)
+
+ if len(ext) > 1 {
+ return ext[1:]
+ }
+
+ return ""
+}
+
+func (v *Viper) getConfigFile() (string, error) {
+ if v.configFile == "" {
+ cf, err := v.findConfigFile()
+ if err != nil {
+ return "", err
+ }
+ v.configFile = cf
+ }
+ return v.configFile, nil
+}
+
+func (v *Viper) searchInPath(in string) (filename string) {
+ jww.DEBUG.Println("Searching for config in ", in)
+ for _, ext := range SupportedExts {
+ jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
+ if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
+ jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
+ return filepath.Join(in, v.configName+"."+ext)
+ }
+ }
+
+ if v.configType != "" {
+ if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
+ return filepath.Join(in, v.configName)
+ }
+ }
+
+ return ""
+}
+
+// Search all configPaths for any config file.
+// Returns the first path that exists (and is a config file).
+func (v *Viper) findConfigFile() (string, error) {
+ jww.INFO.Println("Searching for config in ", v.configPaths)
+
+ for _, cp := range v.configPaths {
+ file := v.searchInPath(cp)
+ if file != "" {
+ return file, nil
+ }
+ }
+ return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
+}
+
+// Debug prints all configuration registries for debugging
+// purposes.
+func Debug() { v.Debug() }
+
+func (v *Viper) Debug() {
+ fmt.Printf("Aliases:\n%#v\n", v.aliases)
+ fmt.Printf("Override:\n%#v\n", v.override)
+ fmt.Printf("PFlags:\n%#v\n", v.pflags)
+ fmt.Printf("Env:\n%#v\n", v.env)
+ fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
+ fmt.Printf("Config:\n%#v\n", v.config)
+ fmt.Printf("Defaults:\n%#v\n", v.defaults)
+}
diff --git a/vendor/github.com/spf13/viper/watch.go b/vendor/github.com/spf13/viper/watch.go
new file mode 100644
index 000000000..c433a8fa4
--- /dev/null
+++ b/vendor/github.com/spf13/viper/watch.go
@@ -0,0 +1,11 @@
+// +build !js
+
+package viper
+
+import "github.com/fsnotify/fsnotify"
+
+type watcher = fsnotify.Watcher
+
+func newWatcher() (*watcher, error) {
+ return fsnotify.NewWatcher()
+}
diff --git a/vendor/github.com/spf13/viper/watch_wasm.go b/vendor/github.com/spf13/viper/watch_wasm.go
new file mode 100644
index 000000000..8e47e6a91
--- /dev/null
+++ b/vendor/github.com/spf13/viper/watch_wasm.go
@@ -0,0 +1,30 @@
+// +build js,wasm
+
+package viper
+
+import (
+ "errors"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+type watcher struct {
+ Events chan fsnotify.Event
+ Errors chan error
+}
+
+func (*watcher) Close() error {
+ return nil
+}
+
+func (*watcher) Add(name string) error {
+ return nil
+}
+
+func (*watcher) Remove(name string) error {
+ return nil
+}
+
+func newWatcher() (*watcher, error) {
+ return &watcher{}, errors.New("fsnotify is not supported on WASM")
+}
diff --git a/vendor/github.com/subosito/gotenv/.env b/vendor/github.com/subosito/gotenv/.env
new file mode 100644
index 000000000..6405eca71
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/.env
@@ -0,0 +1 @@
+HELLO=world
diff --git a/vendor/github.com/subosito/gotenv/.env.invalid b/vendor/github.com/subosito/gotenv/.env.invalid
new file mode 100644
index 000000000..016d5e0ce
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/.env.invalid
@@ -0,0 +1 @@
+lol$wut
diff --git a/vendor/github.com/subosito/gotenv/.gitignore b/vendor/github.com/subosito/gotenv/.gitignore
new file mode 100644
index 000000000..2b8d45610
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/.gitignore
@@ -0,0 +1,3 @@
+*.test
+*.out
+annotate.json
diff --git a/vendor/github.com/subosito/gotenv/.travis.yml b/vendor/github.com/subosito/gotenv/.travis.yml
new file mode 100644
index 000000000..3370d5f40
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+ - 1.x
+os:
+ - linux
+ - osx
+script:
+ - go test -test.v -coverprofile=coverage.out -covermode=count
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/subosito/gotenv/CHANGELOG.md b/vendor/github.com/subosito/gotenv/CHANGELOG.md
new file mode 100644
index 000000000..67f687382
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/CHANGELOG.md
@@ -0,0 +1,47 @@
+# Changelog
+
+## [1.2.0] - 2019-08-03
+
+### Added
+
+- Add `Must` helper to raise an error as panic. It can be used with `Load` and `OverLoad`.
+- Add more tests to be 100% coverage.
+- Add CHANGELOG
+- Add more OS for the test: OSX and Windows
+
+### Changed
+
+- Reduce complexity and improve source code for having `A+` score in [goreportcard](https://goreportcard.com/report/github.com/subosito/gotenv).
+- Updated README with mentions to all available functions
+
+### Removed
+
+- Remove `ErrFormat`
+- Remove `MustLoad` and `MustOverload`, replaced with `Must` helper.
+
+## [1.1.1] - 2018-06-05
+
+### Changed
+
+- Replace `os.Getenv` with `os.LookupEnv` to ensure that the environment variable is not set, by [radding](https://github.com/radding)
+
+## [1.1.0] - 2017-03-20
+
+### Added
+
+- Supports carriage return in env
+- Handle files with UTF-8 BOM
+
+### Changed
+
+- Whitespace handling
+
+### Fixed
+
+- Incorrect variable expansion
+- Handling escaped '$' characters
+
+## [1.0.0] - 2014-10-05
+
+First stable release.
+
diff --git a/vendor/github.com/subosito/gotenv/LICENSE b/vendor/github.com/subosito/gotenv/LICENSE
new file mode 100644
index 000000000..f64ccaedc
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Alif Rachmawadi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/subosito/gotenv/README.md b/vendor/github.com/subosito/gotenv/README.md
new file mode 100644
index 000000000..d610cdf0b
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/README.md
@@ -0,0 +1,131 @@
+# gotenv
+
+[![Build Status](https://travis-ci.org/subosito/gotenv.svg?branch=master)](https://travis-ci.org/subosito/gotenv)
+[![Build status](https://ci.appveyor.com/api/projects/status/wb2e075xkfl0m0v2/branch/master?svg=true)](https://ci.appveyor.com/project/subosito/gotenv/branch/master)
+[![Coverage Status](https://badgen.net/codecov/c/github/subosito/gotenv)](https://codecov.io/gh/subosito/gotenv)
+[![Go Report Card](https://goreportcard.com/badge/github.com/subosito/gotenv)](https://goreportcard.com/report/github.com/subosito/gotenv)
+[![GoDoc](https://godoc.org/github.com/subosito/gotenv?status.svg)](https://godoc.org/github.com/subosito/gotenv)
+
+Load environment variables dynamically in Go.
+
+## Usage
+
+Put the gotenv package on your `import` statement:
+
+```go
+import "github.com/subosito/gotenv"
+```
+
+To modify your app environment variables, `gotenv` expose 2 main functions:
+
+- `gotenv.Load`
+- `gotenv.Apply`
+
+By default, `gotenv.Load` will look for a file called `.env` in the current working directory.
+
+Behind the scene, it will then load `.env` file and export the valid variables to the environment variables. Make sure you call the method as soon as possible to ensure it loads all variables, say, put it on `init()` function.
+
+Once loaded you can use `os.Getenv()` to get the value of the variable.
+
+Let's say you have `.env` file:
+
+```
+APP_ID=1234567
+APP_SECRET=abcdef
+```
+
+Here's the example of your app:
+
+```go
+package main
+
+import (
+ "github.com/subosito/gotenv"
+ "log"
+ "os"
+)
+
+func init() {
+ gotenv.Load()
+}
+
+func main() {
+ log.Println(os.Getenv("APP_ID")) // "1234567"
+ log.Println(os.Getenv("APP_SECRET")) // "abcdef"
+}
+```
+
+You can also load other than `.env` file if you wish. Just supply filenames when calling `Load()`. It will load them in order and the first value set for a variable will win.:
+
+```go
+gotenv.Load(".env.production", "credentials")
+```
+
+While `gotenv.Load` loads entries from `.env` file, `gotenv.Apply` allows you to use any `io.Reader`:
+
+```go
+gotenv.Apply(strings.NewReader("APP_ID=1234567"))
+
+log.Println(os.Getenv("APP_ID"))
+// Output: "1234567"
+```
+
+Both `gotenv.Load` and `gotenv.Apply` **DO NOT** overrides existing environment variables. If you want to override existing ones, you can see section below.
+
+### Environment Overrides
+
+Besides above functions, `gotenv` also provides another functions that overrides existing:
+
+- `gotenv.OverLoad`
+- `gotenv.OverApply`
+
+
+Here's the example of this overrides behavior:
+
+```go
+os.Setenv("HELLO", "world")
+
+// NOTE: using Apply existing value will be reserved
+gotenv.Apply(strings.NewReader("HELLO=universe"))
+fmt.Println(os.Getenv("HELLO"))
+// Output: "world"
+
+// NOTE: using OverApply existing value will be overridden
+gotenv.OverApply(strings.NewReader("HELLO=universe"))
+fmt.Println(os.Getenv("HELLO"))
+// Output: "universe"
+```
+
+### Throw a Panic
+
+Both `gotenv.Load` and `gotenv.OverLoad` returns an error on something wrong occurred, like your env file is not exist, and so on. To make it easier to use, `gotenv` also provides `gotenv.Must` helper, to let it panic when an error returned.
+
+```go
+err := gotenv.Load(".env-is-not-exist")
+fmt.Println("error", err)
+// error: open .env-is-not-exist: no such file or directory
+
+gotenv.Must(gotenv.Load, ".env-is-not-exist")
+// it will throw a panic
+// panic: open .env-is-not-exist: no such file or directory
+```
+
+### Another Scenario
+
+Just in case you want to parse environment variables from any `io.Reader`, gotenv keeps its `Parse` and `StrictParse` function as public API so you can use that.
+
+```go
+// import "strings"
+
+pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO"))
+// gotenv.Env{"FOO": "test", "BAR": "test"}
+
+err, pairs = gotenv.StrictParse(strings.NewReader(`FOO="bar"`))
+// gotenv.Env{"FOO": "bar"}
+```
+
+`Parse` ignores invalid lines and returns `Env` of valid environment variables, while `StrictParse` returns an error for invalid lines.
+
+## Notes
+
+The gotenv package is a Go port of [`dotenv`](https://github.com/bkeepers/dotenv) project with some additions made for Go. For general features, it aims to be compatible as close as possible.
diff --git a/vendor/github.com/subosito/gotenv/appveyor.yml b/vendor/github.com/subosito/gotenv/appveyor.yml
new file mode 100644
index 000000000..33b4c4046
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/appveyor.yml
@@ -0,0 +1,9 @@
+build: off
+clone_folder: c:\gopath\src\github.com\subosito\gotenv
+environment:
+ GOPATH: c:\gopath
+stack: go 1.10
+before_test:
+ - go get -t
+test_script:
+ - go test -v -cover -race
diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go
new file mode 100644
index 000000000..745a34489
--- /dev/null
+++ b/vendor/github.com/subosito/gotenv/gotenv.go
@@ -0,0 +1,265 @@
+// Package gotenv provides functionality to dynamically load the environment variables
+package gotenv
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "strings"
+)
+
+const (
+ // Pattern for detecting valid line format
+ linePattern = `\A\s*(?:export\s+)?([\w\.]+)(?:\s*=\s*|:\s+?)('(?:\'|[^'])*'|"(?:\"|[^"])*"|[^#\n]+)?\s*(?:\s*\#.*)?\z`
+
+ // Pattern for detecting valid variable within a value
+ variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)`
+)
+
+// Env holds key/value pair of valid environment variable
+type Env map[string]string
+
+/*
+Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist.
+When it's called with no argument, it will load `.env` file on the current path and set the environment variables.
+Otherwise, it will loop over the filenames parameter and set the proper environment variables.
+*/
+func Load(filenames ...string) error {
+ return loadenv(false, filenames...)
+}
+
+/*
+OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables.
+*/
+func OverLoad(filenames ...string) error {
+ return loadenv(true, filenames...)
+}
+
+/*
+Must is wrapper function that will panic when supplied function returns an error.
+*/
+func Must(fn func(filenames ...string) error, filenames ...string) {
+ if err := fn(filenames...); err != nil {
+ panic(err.Error())
+ }
+}
+
+/*
+Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist.
+*/
+func Apply(r io.Reader) error {
+ return parset(r, false)
+}
+
+/*
+OverApply is a function to load an io Reader then export and override the valid variables into environment variables.
+*/
+func OverApply(r io.Reader) error {
+ return parset(r, true)
+}
+
+func loadenv(override bool, filenames ...string) error {
+ if len(filenames) == 0 {
+ filenames = []string{".env"}
+ }
+
+ for _, filename := range filenames {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+
+ err = parset(f, override)
+ if err != nil {
+ return err
+ }
+
+ f.Close()
+ }
+
+ return nil
+}
+
+// parse and set :)
+func parset(r io.Reader, override bool) error {
+ env, err := StrictParse(r)
+ if err != nil {
+ return err
+ }
+
+ for key, val := range env {
+ setenv(key, val, override)
+ }
+
+ return nil
+}
+
+func setenv(key, val string, override bool) {
+ if override {
+ os.Setenv(key, val)
+ } else {
+ if _, present := os.LookupEnv(key); !present {
+ os.Setenv(key, val)
+ }
+ }
+}
+
+// Parse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables.
+// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
+// This function is skipping any invalid lines and only processing the valid one.
+func Parse(r io.Reader) Env {
+ env, _ := StrictParse(r)
+ return env
+}
+
+// StrictParse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables.
+// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
+// This function is returning an error if there are any invalid lines.
+func StrictParse(r io.Reader) (Env, error) {
+ env := make(Env)
+ scanner := bufio.NewScanner(r)
+
+ i := 1
+ bom := string([]byte{239, 187, 191})
+
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if i == 1 {
+ line = strings.TrimPrefix(line, bom)
+ }
+
+ i++
+
+ err := parseLine(line, env)
+ if err != nil {
+ return env, err
+ }
+ }
+
+ return env, nil
+}
+
+func parseLine(s string, env Env) error {
+ rl := regexp.MustCompile(linePattern)
+ rm := rl.FindStringSubmatch(s)
+
+ if len(rm) == 0 {
+ return checkFormat(s, env)
+ }
+
+ key := rm[1]
+ val := rm[2]
+
+ // determine if string has quote prefix
+ hdq := strings.HasPrefix(val, `"`)
+
+ // determine if string has single quote prefix
+ hsq := strings.HasPrefix(val, `'`)
+
+ // trim whitespace
+ val = strings.Trim(val, " ")
+
+ // remove quotes '' or ""
+ rq := regexp.MustCompile(`\A(['"])(.*)(['"])\z`)
+ val = rq.ReplaceAllString(val, "$2")
+
+ if hdq {
+ val = strings.Replace(val, `\n`, "\n", -1)
+ val = strings.Replace(val, `\r`, "\r", -1)
+
+ // Unescape all characters except $ so variables can be escaped properly
+ re := regexp.MustCompile(`\\([^$])`)
+ val = re.ReplaceAllString(val, "$1")
+ }
+
+ rv := regexp.MustCompile(variablePattern)
+ fv := func(s string) string {
+ return varReplacement(s, hsq, env)
+ }
+
+ val = rv.ReplaceAllStringFunc(val, fv)
+ val = parseVal(val, env)
+
+ env[key] = val
+ return nil
+}
+
+func parseExport(st string, env Env) error {
+ if strings.HasPrefix(st, "export") {
+ vs := strings.SplitN(st, " ", 2)
+
+ if len(vs) > 1 {
+ if _, ok := env[vs[1]]; !ok {
+ return fmt.Errorf("line `%s` has an unset variable", st)
+ }
+ }
+ }
+
+ return nil
+}
+
+func varReplacement(s string, hsq bool, env Env) string {
+ if strings.HasPrefix(s, "\\") {
+ return strings.TrimPrefix(s, "\\")
+ }
+
+ if hsq {
+ return s
+ }
+
+ sn := `(\$)(\{?([A-Z0-9_]+)\}?)`
+ rn := regexp.MustCompile(sn)
+ mn := rn.FindStringSubmatch(s)
+
+ if len(mn) == 0 {
+ return s
+ }
+
+ v := mn[3]
+
+ replace, ok := env[v]
+ if !ok {
+ replace = os.Getenv(v)
+ }
+
+ return replace
+}
+
+func checkFormat(s string, env Env) error {
+ st := strings.TrimSpace(s)
+
+ if (st == "") || strings.HasPrefix(st, "#") {
+ return nil
+ }
+
+ if err := parseExport(st, env); err != nil {
+ return err
+ }
+
+ return fmt.Errorf("line `%s` doesn't match format", s)
+}
+
+func parseVal(val string, env Env) string {
+ if strings.Contains(val, "=") {
+ if !(val == "\n" || val == "\r") {
+ kv := strings.Split(val, "\n")
+
+ if len(kv) == 1 {
+ kv = strings.Split(val, "\r")
+ }
+
+ if len(kv) > 1 {
+ val = kv[0]
+
+ for i := 1; i < len(kv); i++ {
+ parseLine(kv[i], env)
+ }
+ }
+ }
+ }
+
+ return val
+}
diff --git a/vendor/github.com/urfave/cli/v2/.flake8 b/vendor/github.com/urfave/cli/v2/.flake8
deleted file mode 100644
index 6deafc261..000000000
--- a/vendor/github.com/urfave/cli/v2/.flake8
+++ /dev/null
@@ -1,2 +0,0 @@
-[flake8]
-max-line-length = 120
diff --git a/vendor/github.com/urfave/cli/v2/.gitignore b/vendor/github.com/urfave/cli/v2/.gitignore
deleted file mode 100644
index 2d5e149b4..000000000
--- a/vendor/github.com/urfave/cli/v2/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-*.coverprofile
-*.orig
-node_modules/
-vendor
-.idea
-internal/*/built-example
-coverage.txt
diff --git a/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md
deleted file mode 100644
index 41ba294f6..000000000
--- a/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,74 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age, body
-size, disability, ethnicity, gender identity and expression, level of experience,
-education, socio-economic status, nationality, personal appearance, race,
-religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an appointed
-representative at an online or offline event. Representation of a project may be
-further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting Dan Buch at dan@meatballhat.com. All complaints will be
-reviewed and investigated and will result in a response that is deemed necessary
-and appropriate to the circumstances. The project team is obligated to maintain
-confidentiality with regard to the reporter of an incident. Further details of
-specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
-available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
-
-[homepage]: https://www.contributor-covenant.org
-
diff --git a/vendor/github.com/urfave/cli/v2/README.md b/vendor/github.com/urfave/cli/v2/README.md
deleted file mode 100644
index 408668bc3..000000000
--- a/vendor/github.com/urfave/cli/v2/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-cli
-===
-
-[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli)
-[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli)
-[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli)
-[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli)
-
-cli is a simple, fast, and fun package for building command line apps in Go. The
-goal is to enable developers to write fast and distributable command line
-applications in an expressive way.
-
-## Usage Documentation
-
-Usage documentation exists for each major version. Don't know what version you're on? You're probably using the version from the `master` branch, which is currently `v2`.
-
-- `v2` - [./docs/v2/manual.md](./docs/v2/manual.md)
-- `v1` - [./docs/v1/manual.md](./docs/v1/manual.md)
-
-Guides for migrating to newer versions:
-
-- `v1-to-v2` - [./docs/migrate-v1-to-v2.md](./docs/migrate-v1-to-v2.md)
-
-## Installation
-
-Using this package requires a working Go environment. [See the install instructions for Go](http://golang.org/doc/install.html).
-
-Go Modules are required when using this package. [See the go blog guide on using Go Modules](https://blog.golang.org/using-go-modules).
-
-### Using `v2` releases
-
-```
-$ GO111MODULE=on go get github.com/urfave/cli/v2
-```
-
-```go
-...
-import (
- "github.com/urfave/cli/v2" // imports as package "cli"
-)
-...
-```
-
-### Using `v1` releases
-
-```
-$ GO111MODULE=on go get github.com/urfave/cli
-```
-
-```go
-...
-import (
- "github.com/urfave/cli"
-)
-...
-```
-
-### GOPATH
-
-Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can
-be easily used:
-```
-export PATH=$PATH:$GOPATH/bin
-```
-
-### Supported platforms
-
-cli is tested against multiple versions of Go on Linux, and against the latest
-released version of Go on OS X and Windows. This project uses Github Actions for
-builds. To see our currently supported go versions and platforms, look at the [./.github/workflows/cli.yml](https://github.com/urfave/cli/blob/master/.github/workflows/cli.yml).
diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go
deleted file mode 100644
index f801fce91..000000000
--- a/vendor/github.com/urfave/cli/v2/app.go
+++ /dev/null
@@ -1,540 +0,0 @@
-package cli
-
-import (
- "context"
- "flag"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sort"
- "time"
-)
-
-var (
- changeLogURL = "https://github.com/urfave/cli/blob/master/docs/CHANGELOG.md"
- appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL)
- contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you."
- errInvalidActionType = NewExitError("ERROR invalid Action type. "+
- fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+
- fmt.Sprintf("See %s", appActionDeprecationURL), 2)
-)
-
-// App is the main structure of a cli application. It is recommended that
-// an app be created with the cli.NewApp() function
-type App struct {
- // The name of the program. Defaults to path.Base(os.Args[0])
- Name string
- // Full name of command for help, defaults to Name
- HelpName string
- // Description of the program.
- Usage string
- // Text to override the USAGE section of help
- UsageText string
- // Description of the program argument format.
- ArgsUsage string
- // Version of the program
- Version string
- // Description of the program
- Description string
- // List of commands to execute
- Commands []*Command
- // List of flags to parse
- Flags []Flag
- // Boolean to enable bash completion commands
- EnableBashCompletion bool
- // Boolean to hide built-in help command and help flag
- HideHelp bool
- // Boolean to hide built-in help command but keep help flag.
- // Ignored if HideHelp is true.
- HideHelpCommand bool
- // Boolean to hide built-in version flag and the VERSION section of help
- HideVersion bool
- // categories contains the categorized commands and is populated on app startup
- categories CommandCategories
- // An action to execute when the shell completion flag is set
- BashComplete BashCompleteFunc
- // An action to execute before any subcommands are run, but after the context is ready
- // If a non-nil error is returned, no subcommands are run
- Before BeforeFunc
- // An action to execute after any subcommands are run, but after the subcommand has finished
- // It is run even if Action() panics
- After AfterFunc
- // The action to execute when no subcommands are specified
- Action ActionFunc
- // Execute this function if the proper command cannot be found
- CommandNotFound CommandNotFoundFunc
- // Execute this function if a usage error occurs
- OnUsageError OnUsageErrorFunc
- // Compilation date
- Compiled time.Time
- // List of all authors who contributed
- Authors []*Author
- // Copyright of the binary if any
- Copyright string
- // Reader reader to write input to (useful for tests)
- Reader io.Reader
- // Writer writer to write output to
- Writer io.Writer
- // ErrWriter writes error output
- ErrWriter io.Writer
- // ExitErrHandler processes any error encountered while running an App before
- // it is returned to the caller. If no function is provided, HandleExitCoder
- // is used as the default behavior.
- ExitErrHandler ExitErrHandlerFunc
- // Other custom info
- Metadata map[string]interface{}
- // Carries a function which returns app specific info.
- ExtraInfo func() map[string]string
- // CustomAppHelpTemplate the text template for app help topic.
- // cli.go uses text/template to render templates. You can
- // render custom help text by setting this variable.
- CustomAppHelpTemplate string
- // Boolean to enable short-option handling so user can combine several
- // single-character bool arguments into one
- // i.e. foobar -o -v -> foobar -ov
- UseShortOptionHandling bool
-
- didSetup bool
-}
-
-// Tries to find out when this binary was compiled.
-// Returns the current time if it fails to find it.
-func compileTime() time.Time {
- info, err := os.Stat(os.Args[0])
- if err != nil {
- return time.Now()
- }
- return info.ModTime()
-}
-
-// NewApp creates a new cli Application with some reasonable defaults for Name,
-// Usage, Version and Action.
-func NewApp() *App {
- return &App{
- Name: filepath.Base(os.Args[0]),
- HelpName: filepath.Base(os.Args[0]),
- Usage: "A new cli application",
- UsageText: "",
- BashComplete: DefaultAppComplete,
- Action: helpCommand.Action,
- Compiled: compileTime(),
- Reader: os.Stdin,
- Writer: os.Stdout,
- ErrWriter: os.Stderr,
- }
-}
-
-// Setup runs initialization code to ensure all data structures are ready for
-// `Run` or inspection prior to `Run`. It is internally called by `Run`, but
-// will return early if setup has already happened.
-func (a *App) Setup() {
- if a.didSetup {
- return
- }
-
- a.didSetup = true
-
- if a.Name == "" {
- a.Name = filepath.Base(os.Args[0])
- }
-
- if a.HelpName == "" {
- a.HelpName = filepath.Base(os.Args[0])
- }
-
- if a.Usage == "" {
- a.Usage = "A new cli application"
- }
-
- if a.Version == "" {
- a.HideVersion = true
- }
-
- if a.BashComplete == nil {
- a.BashComplete = DefaultAppComplete
- }
-
- if a.Action == nil {
- a.Action = helpCommand.Action
- }
-
- if a.Compiled == (time.Time{}) {
- a.Compiled = compileTime()
- }
-
- if a.Reader == nil {
- a.Reader = os.Stdin
- }
-
- if a.Writer == nil {
- a.Writer = os.Stdout
- }
-
- if a.ErrWriter == nil {
- a.ErrWriter = os.Stderr
- }
-
- var newCommands []*Command
-
- for _, c := range a.Commands {
- if c.HelpName == "" {
- c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
- }
- newCommands = append(newCommands, c)
- }
- a.Commands = newCommands
-
- if a.Command(helpCommand.Name) == nil && !a.HideHelp {
- if !a.HideHelpCommand {
- a.appendCommand(helpCommand)
- }
-
- if HelpFlag != nil {
- a.appendFlag(HelpFlag)
- }
- }
-
- if !a.HideVersion {
- a.appendFlag(VersionFlag)
- }
-
- a.categories = newCommandCategories()
- for _, command := range a.Commands {
- a.categories.AddCommand(command.Category, command)
- }
- sort.Sort(a.categories.(*commandCategories))
-
- if a.Metadata == nil {
- a.Metadata = make(map[string]interface{})
- }
-}
-
-func (a *App) newFlagSet() (*flag.FlagSet, error) {
- return flagSet(a.Name, a.Flags)
-}
-
-func (a *App) useShortOptionHandling() bool {
- return a.UseShortOptionHandling
-}
-
-// Run is the entry point to the cli app. Parses the arguments slice and routes
-// to the proper flag/args combination
-func (a *App) Run(arguments []string) (err error) {
- return a.RunContext(context.Background(), arguments)
-}
-
-// RunContext is like Run except it takes a Context that will be
-// passed to its commands and sub-commands. Through this, you can
-// propagate timeouts and cancellation requests
-func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
- a.Setup()
-
- // handle the completion flag separately from the flagset since
- // completion could be attempted after a flag, but before its value was put
- // on the command line. this causes the flagset to interpret the completion
- // flag name as the value of the flag before it which is undesirable
- // note that we can only do this because the shell autocomplete function
- // always appends the completion flag at the end of the command
- shellComplete, arguments := checkShellCompleteFlag(a, arguments)
-
- set, err := a.newFlagSet()
- if err != nil {
- return err
- }
-
- err = parseIter(set, a, arguments[1:], shellComplete)
- nerr := normalizeFlags(a.Flags, set)
- context := NewContext(a, set, &Context{Context: ctx})
- if nerr != nil {
- _, _ = fmt.Fprintln(a.Writer, nerr)
- _ = ShowAppHelp(context)
- return nerr
- }
- context.shellComplete = shellComplete
-
- if checkCompletions(context) {
- return nil
- }
-
- if err != nil {
- if a.OnUsageError != nil {
- err := a.OnUsageError(context, err, false)
- a.handleExitCoder(context, err)
- return err
- }
- _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
- _ = ShowAppHelp(context)
- return err
- }
-
- if !a.HideHelp && checkHelp(context) {
- _ = ShowAppHelp(context)
- return nil
- }
-
- if !a.HideVersion && checkVersion(context) {
- ShowVersion(context)
- return nil
- }
-
- cerr := checkRequiredFlags(a.Flags, context)
- if cerr != nil {
- _ = ShowAppHelp(context)
- return cerr
- }
-
- if a.After != nil {
- defer func() {
- if afterErr := a.After(context); afterErr != nil {
- if err != nil {
- err = newMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if a.Before != nil {
- beforeErr := a.Before(context)
- if beforeErr != nil {
- a.handleExitCoder(context, beforeErr)
- err = beforeErr
- return err
- }
- }
-
- args := context.Args()
- if args.Present() {
- name := args.First()
- c := a.Command(name)
- if c != nil {
- return c.Run(context)
- }
- }
-
- if a.Action == nil {
- a.Action = helpCommand.Action
- }
-
- // Run default Action
- err = a.Action(context)
-
- a.handleExitCoder(context, err)
- return err
-}
-
-// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
-//
-// Deprecated: instead you should return an error that fulfills cli.ExitCoder
-// to cli.App.Run. This will cause the application to exit with the given eror
-// code in the cli.ExitCoder
-func (a *App) RunAndExitOnError() {
- if err := a.Run(os.Args); err != nil {
- _, _ = fmt.Fprintln(a.ErrWriter, err)
- OsExiter(1)
- }
-}
-
-// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to
-// generate command-specific flags
-func (a *App) RunAsSubcommand(ctx *Context) (err error) {
- // Setup also handles HideHelp and HideHelpCommand
- a.Setup()
-
- var newCmds []*Command
- for _, c := range a.Commands {
- if c.HelpName == "" {
- c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
- }
- newCmds = append(newCmds, c)
- }
- a.Commands = newCmds
-
- set, err := a.newFlagSet()
- if err != nil {
- return err
- }
-
- err = parseIter(set, a, ctx.Args().Tail(), ctx.shellComplete)
- nerr := normalizeFlags(a.Flags, set)
- context := NewContext(a, set, ctx)
-
- if nerr != nil {
- _, _ = fmt.Fprintln(a.Writer, nerr)
- _, _ = fmt.Fprintln(a.Writer)
- if len(a.Commands) > 0 {
- _ = ShowSubcommandHelp(context)
- } else {
- _ = ShowCommandHelp(ctx, context.Args().First())
- }
- return nerr
- }
-
- if checkCompletions(context) {
- return nil
- }
-
- if err != nil {
- if a.OnUsageError != nil {
- err = a.OnUsageError(context, err, true)
- a.handleExitCoder(context, err)
- return err
- }
- _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
- _ = ShowSubcommandHelp(context)
- return err
- }
-
- if len(a.Commands) > 0 {
- if checkSubcommandHelp(context) {
- return nil
- }
- } else {
- if checkCommandHelp(ctx, context.Args().First()) {
- return nil
- }
- }
-
- cerr := checkRequiredFlags(a.Flags, context)
- if cerr != nil {
- _ = ShowSubcommandHelp(context)
- return cerr
- }
-
- if a.After != nil {
- defer func() {
- afterErr := a.After(context)
- if afterErr != nil {
- a.handleExitCoder(context, err)
- if err != nil {
- err = newMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if a.Before != nil {
- beforeErr := a.Before(context)
- if beforeErr != nil {
- a.handleExitCoder(context, beforeErr)
- err = beforeErr
- return err
- }
- }
-
- args := context.Args()
- if args.Present() {
- name := args.First()
- c := a.Command(name)
- if c != nil {
- return c.Run(context)
- }
- }
-
- // Run default Action
- err = a.Action(context)
-
- a.handleExitCoder(context, err)
- return err
-}
-
-// Command returns the named command on App. Returns nil if the command does not exist
-func (a *App) Command(name string) *Command {
- for _, c := range a.Commands {
- if c.HasName(name) {
- return c
- }
- }
-
- return nil
-}
-
-// VisibleCategories returns a slice of categories and commands that are
-// Hidden=false
-func (a *App) VisibleCategories() []CommandCategory {
- ret := []CommandCategory{}
- for _, category := range a.categories.Categories() {
- if visible := func() CommandCategory {
- if len(category.VisibleCommands()) > 0 {
- return category
- }
- return nil
- }(); visible != nil {
- ret = append(ret, visible)
- }
- }
- return ret
-}
-
-// VisibleCommands returns a slice of the Commands with Hidden=false
-func (a *App) VisibleCommands() []*Command {
- var ret []*Command
- for _, command := range a.Commands {
- if !command.Hidden {
- ret = append(ret, command)
- }
- }
- return ret
-}
-
-// VisibleFlags returns a slice of the Flags with Hidden=false
-func (a *App) VisibleFlags() []Flag {
- return visibleFlags(a.Flags)
-}
-
-func (a *App) appendFlag(fl Flag) {
- if !hasFlag(a.Flags, fl) {
- a.Flags = append(a.Flags, fl)
- }
-}
-
-func (a *App) appendCommand(c *Command) {
- if !hasCommand(a.Commands, c) {
- a.Commands = append(a.Commands, c)
- }
-}
-
-func (a *App) handleExitCoder(context *Context, err error) {
- if a.ExitErrHandler != nil {
- a.ExitErrHandler(context, err)
- } else {
- HandleExitCoder(err)
- }
-}
-
-// Author represents someone who has contributed to a cli project.
-type Author struct {
- Name string // The Authors name
- Email string // The Authors email
-}
-
-// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
-func (a *Author) String() string {
- e := ""
- if a.Email != "" {
- e = " <" + a.Email + ">"
- }
-
- return fmt.Sprintf("%v%v", a.Name, e)
-}
-
-// HandleAction attempts to figure out which Action signature was used. If
-// it's an ActionFunc or a func with the legacy signature for Action, the func
-// is run!
-func HandleAction(action interface{}, context *Context) (err error) {
- switch a := action.(type) {
- case ActionFunc:
- return a(context)
- case func(*Context) error:
- return a(context)
- case func(*Context): // deprecated function signature
- a(context)
- return nil
- }
-
- return errInvalidActionType
-}
diff --git a/vendor/github.com/urfave/cli/v2/args.go b/vendor/github.com/urfave/cli/v2/args.go
deleted file mode 100644
index bd65c17bd..000000000
--- a/vendor/github.com/urfave/cli/v2/args.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package cli
-
-type Args interface {
- // Get returns the nth argument, or else a blank string
- Get(n int) string
- // First returns the first argument, or else a blank string
- First() string
- // Tail returns the rest of the arguments (not the first one)
- // or else an empty string slice
- Tail() []string
- // Len returns the length of the wrapped slice
- Len() int
- // Present checks if there are any arguments present
- Present() bool
- // Slice returns a copy of the internal slice
- Slice() []string
-}
-
-type args []string
-
-func (a *args) Get(n int) string {
- if len(*a) > n {
- return (*a)[n]
- }
- return ""
-}
-
-func (a *args) First() string {
- return a.Get(0)
-}
-
-func (a *args) Tail() []string {
- if a.Len() >= 2 {
- tail := []string((*a)[1:])
- ret := make([]string, len(tail))
- copy(ret, tail)
- return ret
- }
- return []string{}
-}
-
-func (a *args) Len() int {
- return len(*a)
-}
-
-func (a *args) Present() bool {
- return a.Len() != 0
-}
-
-func (a *args) Slice() []string {
- ret := make([]string, len(*a))
- copy(ret, *a)
- return ret
-}
diff --git a/vendor/github.com/urfave/cli/v2/category.go b/vendor/github.com/urfave/cli/v2/category.go
deleted file mode 100644
index 867e3908c..000000000
--- a/vendor/github.com/urfave/cli/v2/category.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package cli
-
-// CommandCategories interface allows for category manipulation
-type CommandCategories interface {
- // AddCommand adds a command to a category, creating a new category if necessary.
- AddCommand(category string, command *Command)
- // categories returns a copy of the category slice
- Categories() []CommandCategory
-}
-
-type commandCategories []*commandCategory
-
-func newCommandCategories() CommandCategories {
- ret := commandCategories([]*commandCategory{})
- return &ret
-}
-
-func (c *commandCategories) Less(i, j int) bool {
- return lexicographicLess((*c)[i].Name(), (*c)[j].Name())
-}
-
-func (c *commandCategories) Len() int {
- return len(*c)
-}
-
-func (c *commandCategories) Swap(i, j int) {
- (*c)[i], (*c)[j] = (*c)[j], (*c)[i]
-}
-
-func (c *commandCategories) AddCommand(category string, command *Command) {
- for _, commandCategory := range []*commandCategory(*c) {
- if commandCategory.name == category {
- commandCategory.commands = append(commandCategory.commands, command)
- return
- }
- }
- newVal := append(*c,
- &commandCategory{name: category, commands: []*Command{command}})
- *c = newVal
-}
-
-func (c *commandCategories) Categories() []CommandCategory {
- ret := make([]CommandCategory, len(*c))
- for i, cat := range *c {
- ret[i] = cat
- }
- return ret
-}
-
-// CommandCategory is a category containing commands.
-type CommandCategory interface {
- // Name returns the category name string
- Name() string
- // VisibleCommands returns a slice of the Commands with Hidden=false
- VisibleCommands() []*Command
-}
-
-type commandCategory struct {
- name string
- commands []*Command
-}
-
-func (c *commandCategory) Name() string {
- return c.name
-}
-
-func (c *commandCategory) VisibleCommands() []*Command {
- if c.commands == nil {
- c.commands = []*Command{}
- }
-
- var ret []*Command
- for _, command := range c.commands {
- if !command.Hidden {
- ret = append(ret, command)
- }
- }
- return ret
-}
diff --git a/vendor/github.com/urfave/cli/v2/cli.go b/vendor/github.com/urfave/cli/v2/cli.go
deleted file mode 100644
index 62a5bc22d..000000000
--- a/vendor/github.com/urfave/cli/v2/cli.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Package cli provides a minimal framework for creating and organizing command line
-// Go applications. cli is designed to be easy to understand and write, the most simple
-// cli application can be written as follows:
-// func main() {
-// (&cli.App{}).Run(os.Args)
-// }
-//
-// Of course this application does not do much, so let's make this an actual application:
-// func main() {
-// app := &cli.App{
-// Name: "greet",
-// Usage: "say a greeting",
-// Action: func(c *cli.Context) error {
-// fmt.Println("Greetings")
-// return nil
-// },
-// }
-//
-// app.Run(os.Args)
-// }
-package cli
-
-//go:generate go run flag-gen/main.go flag-gen/assets_vfsdata.go
diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go
deleted file mode 100644
index dda2f49a0..000000000
--- a/vendor/github.com/urfave/cli/v2/command.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "sort"
- "strings"
-)
-
-// Command is a subcommand for a cli.App.
-type Command struct {
- // The name of the command
- Name string
- // A list of aliases for the command
- Aliases []string
- // A short description of the usage of this command
- Usage string
- // Custom text to show on USAGE section of help
- UsageText string
- // A longer explanation of how the command works
- Description string
- // A short description of the arguments of this command
- ArgsUsage string
- // The category the command is part of
- Category string
- // The function to call when checking for bash command completions
- BashComplete BashCompleteFunc
- // An action to execute before any sub-subcommands are run, but after the context is ready
- // If a non-nil error is returned, no sub-subcommands are run
- Before BeforeFunc
- // An action to execute after any subcommands are run, but after the subcommand has finished
- // It is run even if Action() panics
- After AfterFunc
- // The function to call when this command is invoked
- Action ActionFunc
- // Execute this function if a usage error occurs.
- OnUsageError OnUsageErrorFunc
- // List of child commands
- Subcommands []*Command
- // List of flags to parse
- Flags []Flag
- // Treat all flags as normal arguments if true
- SkipFlagParsing bool
- // Boolean to hide built-in help command and help flag
- HideHelp bool
- // Boolean to hide built-in help command but keep help flag
- // Ignored if HideHelp is true.
- HideHelpCommand bool
- // Boolean to hide this command from help or completion
- Hidden bool
- // Boolean to enable short-option handling so user can combine several
- // single-character bool arguments into one
- // i.e. foobar -o -v -> foobar -ov
- UseShortOptionHandling bool
-
- // Full name of command for help, defaults to full command name, including parent commands.
- HelpName string
- commandNamePath []string
-
- // CustomHelpTemplate the text template for the command help topic.
- // cli.go uses text/template to render templates. You can
- // render custom help text by setting this variable.
- CustomHelpTemplate string
-}
-
-type Commands []*Command
-
-type CommandsByName []*Command
-
-func (c CommandsByName) Len() int {
- return len(c)
-}
-
-func (c CommandsByName) Less(i, j int) bool {
- return lexicographicLess(c[i].Name, c[j].Name)
-}
-
-func (c CommandsByName) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-// FullName returns the full name of the command.
-// For subcommands this ensures that parent commands are part of the command path
-func (c *Command) FullName() string {
- if c.commandNamePath == nil {
- return c.Name
- }
- return strings.Join(c.commandNamePath, " ")
-}
-
-// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags
-func (c *Command) Run(ctx *Context) (err error) {
- if len(c.Subcommands) > 0 {
- return c.startApp(ctx)
- }
-
- if !c.HideHelp && HelpFlag != nil {
- // append help to flags
- c.appendFlag(HelpFlag)
- }
-
- if ctx.App.UseShortOptionHandling {
- c.UseShortOptionHandling = true
- }
-
- set, err := c.parseFlags(ctx.Args(), ctx.shellComplete)
-
- context := NewContext(ctx.App, set, ctx)
- context.Command = c
- if checkCommandCompletions(context, c.Name) {
- return nil
- }
-
- if err != nil {
- if c.OnUsageError != nil {
- err = c.OnUsageError(context, err, false)
- context.App.handleExitCoder(context, err)
- return err
- }
- _, _ = fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error())
- _, _ = fmt.Fprintln(context.App.Writer)
- _ = ShowCommandHelp(context, c.Name)
- return err
- }
-
- if checkCommandHelp(context, c.Name) {
- return nil
- }
-
- cerr := checkRequiredFlags(c.Flags, context)
- if cerr != nil {
- _ = ShowCommandHelp(context, c.Name)
- return cerr
- }
-
- if c.After != nil {
- defer func() {
- afterErr := c.After(context)
- if afterErr != nil {
- context.App.handleExitCoder(context, err)
- if err != nil {
- err = newMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if c.Before != nil {
- err = c.Before(context)
- if err != nil {
- context.App.handleExitCoder(context, err)
- return err
- }
- }
-
- if c.Action == nil {
- c.Action = helpSubcommand.Action
- }
-
- context.Command = c
- err = c.Action(context)
-
- if err != nil {
- context.App.handleExitCoder(context, err)
- }
- return err
-}
-
-func (c *Command) newFlagSet() (*flag.FlagSet, error) {
- return flagSet(c.Name, c.Flags)
-}
-
-func (c *Command) useShortOptionHandling() bool {
- return c.UseShortOptionHandling
-}
-
-func (c *Command) parseFlags(args Args, shellComplete bool) (*flag.FlagSet, error) {
- set, err := c.newFlagSet()
- if err != nil {
- return nil, err
- }
-
- if c.SkipFlagParsing {
- return set, set.Parse(append([]string{"--"}, args.Tail()...))
- }
-
- err = parseIter(set, c, args.Tail(), shellComplete)
- if err != nil {
- return nil, err
- }
-
- err = normalizeFlags(c.Flags, set)
- if err != nil {
- return nil, err
- }
-
- return set, nil
-}
-
-// Names returns the names including short names and aliases.
-func (c *Command) Names() []string {
- return append([]string{c.Name}, c.Aliases...)
-}
-
-// HasName returns true if Command.Name matches given name
-func (c *Command) HasName(name string) bool {
- for _, n := range c.Names() {
- if n == name {
- return true
- }
- }
- return false
-}
-
-func (c *Command) startApp(ctx *Context) error {
- app := &App{
- Metadata: ctx.App.Metadata,
- Name: fmt.Sprintf("%s %s", ctx.App.Name, c.Name),
- }
-
- if c.HelpName == "" {
- app.HelpName = c.HelpName
- } else {
- app.HelpName = app.Name
- }
-
- app.Usage = c.Usage
- app.Description = c.Description
- app.ArgsUsage = c.ArgsUsage
-
- // set CommandNotFound
- app.CommandNotFound = ctx.App.CommandNotFound
- app.CustomAppHelpTemplate = c.CustomHelpTemplate
-
- // set the flags and commands
- app.Commands = c.Subcommands
- app.Flags = c.Flags
- app.HideHelp = c.HideHelp
- app.HideHelpCommand = c.HideHelpCommand
-
- app.Version = ctx.App.Version
- app.HideVersion = true
- app.Compiled = ctx.App.Compiled
- app.Writer = ctx.App.Writer
- app.ErrWriter = ctx.App.ErrWriter
- app.ExitErrHandler = ctx.App.ExitErrHandler
- app.UseShortOptionHandling = ctx.App.UseShortOptionHandling
-
- app.categories = newCommandCategories()
- for _, command := range c.Subcommands {
- app.categories.AddCommand(command.Category, command)
- }
-
- sort.Sort(app.categories.(*commandCategories))
-
- // bash completion
- app.EnableBashCompletion = ctx.App.EnableBashCompletion
- if c.BashComplete != nil {
- app.BashComplete = c.BashComplete
- }
-
- // set the actions
- app.Before = c.Before
- app.After = c.After
- if c.Action != nil {
- app.Action = c.Action
- } else {
- app.Action = helpSubcommand.Action
- }
- app.OnUsageError = c.OnUsageError
-
- for index, cc := range app.Commands {
- app.Commands[index].commandNamePath = []string{c.Name, cc.Name}
- }
-
- return app.RunAsSubcommand(ctx)
-}
-
-// VisibleFlags returns a slice of the Flags with Hidden=false
-func (c *Command) VisibleFlags() []Flag {
- return visibleFlags(c.Flags)
-}
-
-func (c *Command) appendFlag(fl Flag) {
- if !hasFlag(c.Flags, fl) {
- c.Flags = append(c.Flags, fl)
- }
-}
-
-func hasCommand(commands []*Command, command *Command) bool {
- for _, existing := range commands {
- if command == existing {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go
deleted file mode 100644
index 74ed51912..000000000
--- a/vendor/github.com/urfave/cli/v2/context.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package cli
-
-import (
- "context"
- "errors"
- "flag"
- "fmt"
- "strings"
-)
-
-// Context is a type that is passed through to
-// each Handler action in a cli application. Context
-// can be used to retrieve context-specific args and
-// parsed command-line options.
-type Context struct {
- context.Context
- App *App
- Command *Command
- shellComplete bool
- flagSet *flag.FlagSet
- parentContext *Context
-}
-
-// NewContext creates a new context. For use in when invoking an App or Command action.
-func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
- c := &Context{App: app, flagSet: set, parentContext: parentCtx}
- if parentCtx != nil {
- c.Context = parentCtx.Context
- c.shellComplete = parentCtx.shellComplete
- if parentCtx.flagSet == nil {
- parentCtx.flagSet = &flag.FlagSet{}
- }
- }
-
- c.Command = &Command{}
-
- if c.Context == nil {
- c.Context = context.Background()
- }
-
- return c
-}
-
-// NumFlags returns the number of flags set
-func (c *Context) NumFlags() int {
- return c.flagSet.NFlag()
-}
-
-// Set sets a context flag to a value.
-func (c *Context) Set(name, value string) error {
- return c.flagSet.Set(name, value)
-}
-
-// IsSet determines if the flag was actually set
-func (c *Context) IsSet(name string) bool {
- if fs := lookupFlagSet(name, c); fs != nil {
- if fs := lookupFlagSet(name, c); fs != nil {
- isSet := false
- fs.Visit(func(f *flag.Flag) {
- if f.Name == name {
- isSet = true
- }
- })
- if isSet {
- return true
- }
- }
-
- f := lookupFlag(name, c)
- if f == nil {
- return false
- }
-
- return f.IsSet()
- }
-
- return false
-}
-
-// LocalFlagNames returns a slice of flag names used in this context.
-func (c *Context) LocalFlagNames() []string {
- var names []string
- c.flagSet.Visit(makeFlagNameVisitor(&names))
- return names
-}
-
-// FlagNames returns a slice of flag names used by the this context and all of
-// its parent contexts.
-func (c *Context) FlagNames() []string {
- var names []string
- for _, ctx := range c.Lineage() {
- ctx.flagSet.Visit(makeFlagNameVisitor(&names))
- }
- return names
-}
-
-// Lineage returns *this* context and all of its ancestor contexts in order from
-// child to parent
-func (c *Context) Lineage() []*Context {
- var lineage []*Context
-
- for cur := c; cur != nil; cur = cur.parentContext {
- lineage = append(lineage, cur)
- }
-
- return lineage
-}
-
-// Value returns the value of the flag corresponding to `name`
-func (c *Context) Value(name string) interface{} {
- return c.flagSet.Lookup(name).Value.(flag.Getter).Get()
-}
-
-// Args returns the command line arguments associated with the context.
-func (c *Context) Args() Args {
- ret := args(c.flagSet.Args())
- return &ret
-}
-
-// NArg returns the number of the command line arguments.
-func (c *Context) NArg() int {
- return c.Args().Len()
-}
-
-func lookupFlag(name string, ctx *Context) Flag {
- for _, c := range ctx.Lineage() {
- if c.Command == nil {
- continue
- }
-
- for _, f := range c.Command.Flags {
- for _, n := range f.Names() {
- if n == name {
- return f
- }
- }
- }
- }
-
- if ctx.App != nil {
- for _, f := range ctx.App.Flags {
- for _, n := range f.Names() {
- if n == name {
- return f
- }
- }
- }
- }
-
- return nil
-}
-
-func lookupFlagSet(name string, ctx *Context) *flag.FlagSet {
- for _, c := range ctx.Lineage() {
- if f := c.flagSet.Lookup(name); f != nil {
- return c.flagSet
- }
- }
-
- return nil
-}
-
-func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
- switch ff.Value.(type) {
- case Serializer:
- _ = set.Set(name, ff.Value.(Serializer).Serialize())
- default:
- _ = set.Set(name, ff.Value.String())
- }
-}
-
-func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
- visited := make(map[string]bool)
- set.Visit(func(f *flag.Flag) {
- visited[f.Name] = true
- })
- for _, f := range flags {
- parts := f.Names()
- if len(parts) == 1 {
- continue
- }
- var ff *flag.Flag
- for _, name := range parts {
- name = strings.Trim(name, " ")
- if visited[name] {
- if ff != nil {
- return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
- }
- ff = set.Lookup(name)
- }
- }
- if ff == nil {
- continue
- }
- for _, name := range parts {
- name = strings.Trim(name, " ")
- if !visited[name] {
- copyFlag(name, ff, set)
- }
- }
- }
- return nil
-}
-
-func makeFlagNameVisitor(names *[]string) func(*flag.Flag) {
- return func(f *flag.Flag) {
- nameParts := strings.Split(f.Name, ",")
- name := strings.TrimSpace(nameParts[0])
-
- for _, part := range nameParts {
- part = strings.TrimSpace(part)
- if len(part) > len(name) {
- name = part
- }
- }
-
- if name != "" {
- *names = append(*names, name)
- }
- }
-}
-
-type requiredFlagsErr interface {
- error
- getMissingFlags() []string
-}
-
-type errRequiredFlags struct {
- missingFlags []string
-}
-
-func (e *errRequiredFlags) Error() string {
- numberOfMissingFlags := len(e.missingFlags)
- if numberOfMissingFlags == 1 {
- return fmt.Sprintf("Required flag %q not set", e.missingFlags[0])
- }
- joinedMissingFlags := strings.Join(e.missingFlags, ", ")
- return fmt.Sprintf("Required flags %q not set", joinedMissingFlags)
-}
-
-func (e *errRequiredFlags) getMissingFlags() []string {
- return e.missingFlags
-}
-
-func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr {
- var missingFlags []string
- for _, f := range flags {
- if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() {
- var flagPresent bool
- var flagName string
-
- for _, key := range f.Names() {
- if len(key) > 1 {
- flagName = key
- }
-
- if context.IsSet(strings.TrimSpace(key)) {
- flagPresent = true
- }
- }
-
- if !flagPresent && flagName != "" {
- missingFlags = append(missingFlags, flagName)
- }
- }
- }
-
- if len(missingFlags) != 0 {
- return &errRequiredFlags{missingFlags: missingFlags}
- }
-
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/v2/docs.go b/vendor/github.com/urfave/cli/v2/docs.go
deleted file mode 100644
index dc16fc82d..000000000
--- a/vendor/github.com/urfave/cli/v2/docs.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package cli
-
-import (
- "bytes"
- "fmt"
- "io"
- "sort"
- "strings"
- "text/template"
-
- "github.com/cpuguy83/go-md2man/v2/md2man"
-)
-
-// ToMarkdown creates a markdown string for the `*App`
-// The function errors if either parsing or writing of the string fails.
-func (a *App) ToMarkdown() (string, error) {
- var w bytes.Buffer
- if err := a.writeDocTemplate(&w); err != nil {
- return "", err
- }
- return w.String(), nil
-}
-
-// ToMan creates a man page string for the `*App`
-// The function errors if either parsing or writing of the string fails.
-func (a *App) ToMan() (string, error) {
- var w bytes.Buffer
- if err := a.writeDocTemplate(&w); err != nil {
- return "", err
- }
- man := md2man.Render(w.Bytes())
- return string(man), nil
-}
-
-type cliTemplate struct {
- App *App
- Commands []string
- GlobalArgs []string
- SynopsisArgs []string
-}
-
-func (a *App) writeDocTemplate(w io.Writer) error {
- const name = "cli"
- t, err := template.New(name).Parse(MarkdownDocTemplate)
- if err != nil {
- return err
- }
- return t.ExecuteTemplate(w, name, &cliTemplate{
- App: a,
- Commands: prepareCommands(a.Commands, 0),
- GlobalArgs: prepareArgsWithValues(a.VisibleFlags()),
- SynopsisArgs: prepareArgsSynopsis(a.VisibleFlags()),
- })
-}
-
-func prepareCommands(commands []*Command, level int) []string {
- var coms []string
- for _, command := range commands {
- if command.Hidden {
- continue
- }
- usage := ""
- if command.Usage != "" {
- usage = command.Usage
- }
-
- prepared := fmt.Sprintf("%s %s\n\n%s\n",
- strings.Repeat("#", level+2),
- strings.Join(command.Names(), ", "),
- usage,
- )
-
- flags := prepareArgsWithValues(command.Flags)
- if len(flags) > 0 {
- prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n"))
- }
-
- coms = append(coms, prepared)
-
- // recursevly iterate subcommands
- if len(command.Subcommands) > 0 {
- coms = append(
- coms,
- prepareCommands(command.Subcommands, level+1)...,
- )
- }
- }
-
- return coms
-}
-
-func prepareArgsWithValues(flags []Flag) []string {
- return prepareFlags(flags, ", ", "**", "**", `""`, true)
-}
-
-func prepareArgsSynopsis(flags []Flag) []string {
- return prepareFlags(flags, "|", "[", "]", "[value]", false)
-}
-
-func prepareFlags(
- flags []Flag,
- sep, opener, closer, value string,
- addDetails bool,
-) []string {
- args := []string{}
- for _, f := range flags {
- flag, ok := f.(DocGenerationFlag)
- if !ok {
- continue
- }
- modifiedArg := opener
-
- for _, s := range flag.Names() {
- trimmed := strings.TrimSpace(s)
- if len(modifiedArg) > len(opener) {
- modifiedArg += sep
- }
- if len(trimmed) > 1 {
- modifiedArg += fmt.Sprintf("--%s", trimmed)
- } else {
- modifiedArg += fmt.Sprintf("-%s", trimmed)
- }
- }
- modifiedArg += closer
- if flag.TakesValue() {
- modifiedArg += fmt.Sprintf("=%s", value)
- }
-
- if addDetails {
- modifiedArg += flagDetails(flag)
- }
-
- args = append(args, modifiedArg+"\n")
-
- }
- sort.Strings(args)
- return args
-}
-
-// flagDetails returns a string containing the flags metadata
-func flagDetails(flag DocGenerationFlag) string {
- description := flag.GetUsage()
- value := flag.GetValue()
- if value != "" {
- description += " (default: " + value + ")"
- }
- return ": " + description
-}
diff --git a/vendor/github.com/urfave/cli/v2/errors.go b/vendor/github.com/urfave/cli/v2/errors.go
deleted file mode 100644
index 751ef9b16..000000000
--- a/vendor/github.com/urfave/cli/v2/errors.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-)
-
-// OsExiter is the function used when the app exits. If not set defaults to os.Exit.
-var OsExiter = os.Exit
-
-// ErrWriter is used to write errors to the user. This can be anything
-// implementing the io.Writer interface and defaults to os.Stderr.
-var ErrWriter io.Writer = os.Stderr
-
-// MultiError is an error that wraps multiple errors.
-type MultiError interface {
- error
- Errors() []error
-}
-
-// newMultiError creates a new MultiError. Pass in one or more errors.
-func newMultiError(err ...error) MultiError {
- ret := multiError(err)
- return &ret
-}
-
-type multiError []error
-
-// Error implements the error interface.
-func (m *multiError) Error() string {
- errs := make([]string, len(*m))
- for i, err := range *m {
- errs[i] = err.Error()
- }
-
- return strings.Join(errs, "\n")
-}
-
-// Errors returns a copy of the errors slice
-func (m *multiError) Errors() []error {
- errs := make([]error, len(*m))
- for _, err := range *m {
- errs = append(errs, err)
- }
- return errs
-}
-
-// ErrorFormatter is the interface that will suitably format the error output
-type ErrorFormatter interface {
- Format(s fmt.State, verb rune)
-}
-
-// ExitCoder is the interface checked by `App` and `Command` for a custom exit
-// code
-type ExitCoder interface {
- error
- ExitCode() int
-}
-
-type exitError struct {
- exitCode int
- message interface{}
-}
-
-// NewExitError calls Exit to create a new ExitCoder.
-//
-// Deprecated: This function is a duplicate of Exit and will eventually be removed.
-func NewExitError(message interface{}, exitCode int) ExitCoder {
- return Exit(message, exitCode)
-}
-
-// Exit wraps a message and exit code into an error, which by default is
-// handled with a call to os.Exit during default error handling.
-//
-// This is the simplest way to trigger a non-zero exit code for an App without
-// having to call os.Exit manually. During testing, this behavior can be avoided
-// by overiding the ExitErrHandler function on an App or the package-global
-// OsExiter function.
-func Exit(message interface{}, exitCode int) ExitCoder {
- return &exitError{
- message: message,
- exitCode: exitCode,
- }
-}
-
-func (ee *exitError) Error() string {
- return fmt.Sprintf("%v", ee.message)
-}
-
-func (ee *exitError) ExitCode() int {
- return ee.exitCode
-}
-
-// HandleExitCoder handles errors implementing ExitCoder by printing their
-// message and calling OsExiter with the given exit code.
-//
-// If the given error instead implements MultiError, each error will be checked
-// for the ExitCoder interface, and OsExiter will be called with the last exit
-// code found, or exit code 1 if no ExitCoder is found.
-//
-// This function is the default error-handling behavior for an App.
-func HandleExitCoder(err error) {
- if err == nil {
- return
- }
-
- if exitErr, ok := err.(ExitCoder); ok {
- if err.Error() != "" {
- if _, ok := exitErr.(ErrorFormatter); ok {
- _, _ = fmt.Fprintf(ErrWriter, "%+v\n", err)
- } else {
- _, _ = fmt.Fprintln(ErrWriter, err)
- }
- }
- OsExiter(exitErr.ExitCode())
- return
- }
-
- if multiErr, ok := err.(MultiError); ok {
- code := handleMultiError(multiErr)
- OsExiter(code)
- return
- }
-}
-
-func handleMultiError(multiErr MultiError) int {
- code := 1
- for _, merr := range multiErr.Errors() {
- if multiErr2, ok := merr.(MultiError); ok {
- code = handleMultiError(multiErr2)
- } else if merr != nil {
- fmt.Fprintln(ErrWriter, merr)
- if exitErr, ok := merr.(ExitCoder); ok {
- code = exitErr.ExitCode()
- }
- }
- }
- return code
-}
diff --git a/vendor/github.com/urfave/cli/v2/fish.go b/vendor/github.com/urfave/cli/v2/fish.go
deleted file mode 100644
index 588e070ea..000000000
--- a/vendor/github.com/urfave/cli/v2/fish.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package cli
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
- "text/template"
-)
-
-// ToFishCompletion creates a fish completion string for the `*App`
-// The function errors if either parsing or writing of the string fails.
-func (a *App) ToFishCompletion() (string, error) {
- var w bytes.Buffer
- if err := a.writeFishCompletionTemplate(&w); err != nil {
- return "", err
- }
- return w.String(), nil
-}
-
-type fishCompletionTemplate struct {
- App *App
- Completions []string
- AllCommands []string
-}
-
-func (a *App) writeFishCompletionTemplate(w io.Writer) error {
- const name = "cli"
- t, err := template.New(name).Parse(FishCompletionTemplate)
- if err != nil {
- return err
- }
- allCommands := []string{}
-
- // Add global flags
- completions := a.prepareFishFlags(a.VisibleFlags(), allCommands)
-
- // Add help flag
- if !a.HideHelp {
- completions = append(
- completions,
- a.prepareFishFlags([]Flag{HelpFlag}, allCommands)...,
- )
- }
-
- // Add version flag
- if !a.HideVersion {
- completions = append(
- completions,
- a.prepareFishFlags([]Flag{VersionFlag}, allCommands)...,
- )
- }
-
- // Add commands and their flags
- completions = append(
- completions,
- a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})...,
- )
-
- return t.ExecuteTemplate(w, name, &fishCompletionTemplate{
- App: a,
- Completions: completions,
- AllCommands: allCommands,
- })
-}
-
-func (a *App) prepareFishCommands(commands []*Command, allCommands *[]string, previousCommands []string) []string {
- completions := []string{}
- for _, command := range commands {
- if command.Hidden {
- continue
- }
-
- var completion strings.Builder
- completion.WriteString(fmt.Sprintf(
- "complete -r -c %s -n '%s' -a '%s'",
- a.Name,
- a.fishSubcommandHelper(previousCommands),
- strings.Join(command.Names(), " "),
- ))
-
- if command.Usage != "" {
- completion.WriteString(fmt.Sprintf(" -d '%s'",
- escapeSingleQuotes(command.Usage)))
- }
-
- if !command.HideHelp {
- completions = append(
- completions,
- a.prepareFishFlags([]Flag{HelpFlag}, command.Names())...,
- )
- }
-
- *allCommands = append(*allCommands, command.Names()...)
- completions = append(completions, completion.String())
- completions = append(
- completions,
- a.prepareFishFlags(command.Flags, command.Names())...,
- )
-
- // recursevly iterate subcommands
- if len(command.Subcommands) > 0 {
- completions = append(
- completions,
- a.prepareFishCommands(
- command.Subcommands, allCommands, command.Names(),
- )...,
- )
- }
- }
-
- return completions
-}
-
-func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string {
- completions := []string{}
- for _, f := range flags {
- flag, ok := f.(DocGenerationFlag)
- if !ok {
- continue
- }
-
- completion := &strings.Builder{}
- completion.WriteString(fmt.Sprintf(
- "complete -c %s -n '%s'",
- a.Name,
- a.fishSubcommandHelper(previousCommands),
- ))
-
- fishAddFileFlag(f, completion)
-
- for idx, opt := range flag.Names() {
- if idx == 0 {
- completion.WriteString(fmt.Sprintf(
- " -l %s", strings.TrimSpace(opt),
- ))
- } else {
- completion.WriteString(fmt.Sprintf(
- " -s %s", strings.TrimSpace(opt),
- ))
-
- }
- }
-
- if flag.TakesValue() {
- completion.WriteString(" -r")
- }
-
- if flag.GetUsage() != "" {
- completion.WriteString(fmt.Sprintf(" -d '%s'",
- escapeSingleQuotes(flag.GetUsage())))
- }
-
- completions = append(completions, completion.String())
- }
-
- return completions
-}
-
-func fishAddFileFlag(flag Flag, completion *strings.Builder) {
- switch f := flag.(type) {
- case *GenericFlag:
- if f.TakesFile {
- return
- }
- case *StringFlag:
- if f.TakesFile {
- return
- }
- case *StringSliceFlag:
- if f.TakesFile {
- return
- }
- case *PathFlag:
- if f.TakesFile {
- return
- }
- }
- completion.WriteString(" -f")
-}
-
-func (a *App) fishSubcommandHelper(allCommands []string) string {
- fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name)
- if len(allCommands) > 0 {
- fishHelper = fmt.Sprintf(
- "__fish_seen_subcommand_from %s",
- strings.Join(allCommands, " "),
- )
- }
- return fishHelper
-
-}
-
-func escapeSingleQuotes(input string) string {
- return strings.Replace(input, `'`, `\'`, -1)
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go
deleted file mode 100644
index ad97c2d05..000000000
--- a/vendor/github.com/urfave/cli/v2/flag.go
+++ /dev/null
@@ -1,388 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "reflect"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "syscall"
- "time"
-)
-
-const defaultPlaceholder = "value"
-
-var (
- slPfx = fmt.Sprintf("sl:::%d:::", time.Now().UTC().UnixNano())
-
- commaWhitespace = regexp.MustCompile("[, ]+.*")
-)
-
-// BashCompletionFlag enables bash-completion for all commands and subcommands
-var BashCompletionFlag Flag = &BoolFlag{
- Name: "generate-bash-completion",
- Hidden: true,
-}
-
-// VersionFlag prints the version for the application
-var VersionFlag Flag = &BoolFlag{
- Name: "version",
- Aliases: []string{"v"},
- Usage: "print the version",
-}
-
-// HelpFlag prints the help for all commands and subcommands.
-// Set to nil to disable the flag. The subcommand
-// will still be added unless HideHelp or HideHelpCommand is set to true.
-var HelpFlag Flag = &BoolFlag{
- Name: "help",
- Aliases: []string{"h"},
- Usage: "show help",
-}
-
-// FlagStringer converts a flag definition to a string. This is used by help
-// to display a flag.
-var FlagStringer FlagStringFunc = stringifyFlag
-
-// Serializer is used to circumvent the limitations of flag.FlagSet.Set
-type Serializer interface {
- Serialize() string
-}
-
-// FlagNamePrefixer converts a full flag name and its placeholder into the help
-// message flag prefix. This is used by the default FlagStringer.
-var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames
-
-// FlagEnvHinter annotates flag help message with the environment variable
-// details. This is used by the default FlagStringer.
-var FlagEnvHinter FlagEnvHintFunc = withEnvHint
-
-// FlagFileHinter annotates flag help message with the environment variable
-// details. This is used by the default FlagStringer.
-var FlagFileHinter FlagFileHintFunc = withFileHint
-
-// FlagsByName is a slice of Flag.
-type FlagsByName []Flag
-
-func (f FlagsByName) Len() int {
- return len(f)
-}
-
-func (f FlagsByName) Less(i, j int) bool {
- if len(f[j].Names()) == 0 {
- return false
- } else if len(f[i].Names()) == 0 {
- return true
- }
- return lexicographicLess(f[i].Names()[0], f[j].Names()[0])
-}
-
-func (f FlagsByName) Swap(i, j int) {
- f[i], f[j] = f[j], f[i]
-}
-
-// Flag is a common interface related to parsing flags in cli.
-// For more advanced flag parsing techniques, it is recommended that
-// this interface be implemented.
-type Flag interface {
- fmt.Stringer
- // Apply Flag settings to the given flag set
- Apply(*flag.FlagSet) error
- Names() []string
- IsSet() bool
-}
-
-// RequiredFlag is an interface that allows us to mark flags as required
-// it allows flags required flags to be backwards compatible with the Flag interface
-type RequiredFlag interface {
- Flag
-
- IsRequired() bool
-}
-
-// DocGenerationFlag is an interface that allows documentation generation for the flag
-type DocGenerationFlag interface {
- Flag
-
- // TakesValue returns true if the flag takes a value, otherwise false
- TakesValue() bool
-
- // GetUsage returns the usage string for the flag
- GetUsage() string
-
- // GetValue returns the flags value as string representation and an empty
- // string if the flag takes no value at all.
- GetValue() string
-}
-
-func flagSet(name string, flags []Flag) (*flag.FlagSet, error) {
- set := flag.NewFlagSet(name, flag.ContinueOnError)
-
- for _, f := range flags {
- if err := f.Apply(set); err != nil {
- return nil, err
- }
- }
- set.SetOutput(ioutil.Discard)
- return set, nil
-}
-
-func visibleFlags(fl []Flag) []Flag {
- var visible []Flag
- for _, f := range fl {
- field := flagValue(f).FieldByName("Hidden")
- if !field.IsValid() || !field.Bool() {
- visible = append(visible, f)
- }
- }
- return visible
-}
-
-func prefixFor(name string) (prefix string) {
- if len(name) == 1 {
- prefix = "-"
- } else {
- prefix = "--"
- }
-
- return
-}
-
-// Returns the placeholder, if any, and the unquoted usage string.
-func unquoteUsage(usage string) (string, string) {
- for i := 0; i < len(usage); i++ {
- if usage[i] == '`' {
- for j := i + 1; j < len(usage); j++ {
- if usage[j] == '`' {
- name := usage[i+1 : j]
- usage = usage[:i] + name + usage[j+1:]
- return name, usage
- }
- }
- break
- }
- }
- return "", usage
-}
-
-func prefixedNames(names []string, placeholder string) string {
- var prefixed string
- for i, name := range names {
- if name == "" {
- continue
- }
-
- prefixed += prefixFor(name) + name
- if placeholder != "" {
- prefixed += " " + placeholder
- }
- if i < len(names)-1 {
- prefixed += ", "
- }
- }
- return prefixed
-}
-
-func withEnvHint(envVars []string, str string) string {
- envText := ""
- if envVars != nil && len(envVars) > 0 {
- prefix := "$"
- suffix := ""
- sep := ", $"
- if runtime.GOOS == "windows" {
- prefix = "%"
- suffix = "%"
- sep = "%, %"
- }
-
- envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(envVars, sep), suffix)
- }
- return str + envText
-}
-
-func flagNames(name string, aliases []string) []string {
- var ret []string
-
- for _, part := range append([]string{name}, aliases...) {
- // v1 -> v2 migration warning zone:
- // Strip off anything after the first found comma or space, which
- // *hopefully* makes it a tiny bit more obvious that unexpected behavior is
- // caused by using the v1 form of stringly typed "Name".
- ret = append(ret, commaWhitespace.ReplaceAllString(part, ""))
- }
-
- return ret
-}
-
-func flagStringSliceField(f Flag, name string) []string {
- fv := flagValue(f)
- field := fv.FieldByName(name)
-
- if field.IsValid() {
- return field.Interface().([]string)
- }
-
- return []string{}
-}
-
-func withFileHint(filePath, str string) string {
- fileText := ""
- if filePath != "" {
- fileText = fmt.Sprintf(" [%s]", filePath)
- }
- return str + fileText
-}
-
-func flagValue(f Flag) reflect.Value {
- fv := reflect.ValueOf(f)
- for fv.Kind() == reflect.Ptr {
- fv = reflect.Indirect(fv)
- }
- return fv
-}
-
-func formatDefault(format string) string {
- return " (default: " + format + ")"
-}
-
-func stringifyFlag(f Flag) string {
- fv := flagValue(f)
-
- switch f := f.(type) {
- case *IntSliceFlag:
- return withEnvHint(flagStringSliceField(f, "EnvVars"),
- stringifyIntSliceFlag(f))
- case *Int64SliceFlag:
- return withEnvHint(flagStringSliceField(f, "EnvVars"),
- stringifyInt64SliceFlag(f))
- case *Float64SliceFlag:
- return withEnvHint(flagStringSliceField(f, "EnvVars"),
- stringifyFloat64SliceFlag(f))
- case *StringSliceFlag:
- return withEnvHint(flagStringSliceField(f, "EnvVars"),
- stringifyStringSliceFlag(f))
- }
-
- placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String())
-
- needsPlaceholder := false
- defaultValueString := ""
- val := fv.FieldByName("Value")
- if val.IsValid() {
- needsPlaceholder = val.Kind() != reflect.Bool
- defaultValueString = fmt.Sprintf(formatDefault("%v"), val.Interface())
-
- if val.Kind() == reflect.String && val.String() != "" {
- defaultValueString = fmt.Sprintf(formatDefault("%q"), val.String())
- }
- }
-
- helpText := fv.FieldByName("DefaultText")
- if helpText.IsValid() && helpText.String() != "" {
- needsPlaceholder = val.Kind() != reflect.Bool
- defaultValueString = fmt.Sprintf(formatDefault("%s"), helpText.String())
- }
-
- if defaultValueString == formatDefault("") {
- defaultValueString = ""
- }
-
- if needsPlaceholder && placeholder == "" {
- placeholder = defaultPlaceholder
- }
-
- usageWithDefault := strings.TrimSpace(usage + defaultValueString)
-
- return withEnvHint(flagStringSliceField(f, "EnvVars"),
- fmt.Sprintf("%s\t%s", prefixedNames(f.Names(), placeholder), usageWithDefault))
-}
-
-func stringifyIntSliceFlag(f *IntSliceFlag) string {
- var defaultVals []string
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, i := range f.Value.Value() {
- defaultVals = append(defaultVals, strconv.Itoa(i))
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
-}
-
-func stringifyInt64SliceFlag(f *Int64SliceFlag) string {
- var defaultVals []string
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, i := range f.Value.Value() {
- defaultVals = append(defaultVals, strconv.FormatInt(i, 10))
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
-}
-
-func stringifyFloat64SliceFlag(f *Float64SliceFlag) string {
- var defaultVals []string
-
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, i := range f.Value.Value() {
- defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), "."))
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
-}
-
-func stringifyStringSliceFlag(f *StringSliceFlag) string {
- var defaultVals []string
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, s := range f.Value.Value() {
- if len(s) > 0 {
- defaultVals = append(defaultVals, strconv.Quote(s))
- }
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
-}
-
-func stringifySliceFlag(usage string, names, defaultVals []string) string {
- placeholder, usage := unquoteUsage(usage)
- if placeholder == "" {
- placeholder = defaultPlaceholder
- }
-
- defaultVal := ""
- if len(defaultVals) > 0 {
- defaultVal = fmt.Sprintf(formatDefault("%s"), strings.Join(defaultVals, ", "))
- }
-
- usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal))
- return fmt.Sprintf("%s\t%s", prefixedNames(names, placeholder), usageWithDefault)
-}
-
-func hasFlag(flags []Flag, fl Flag) bool {
- for _, existing := range flags {
- if fl == existing {
- return true
- }
- }
-
- return false
-}
-
-func flagFromEnvOrFile(envVars []string, filePath string) (val string, ok bool) {
- for _, envVar := range envVars {
- envVar = strings.TrimSpace(envVar)
- if val, ok := syscall.Getenv(envVar); ok {
- return val, true
- }
- }
- for _, fileVar := range strings.Split(filePath, ",") {
- if data, err := ioutil.ReadFile(fileVar); err == nil {
- return string(data), true
- }
- }
- return "", false
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go
deleted file mode 100644
index bc9ea35d0..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_bool.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "strconv"
-)
-
-// BoolFlag is a flag with type bool
-type BoolFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value bool
- DefaultText string
- Destination *bool
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *BoolFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *BoolFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *BoolFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *BoolFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *BoolFlag) TakesValue() bool {
- return false
-}
-
-// GetUsage returns the usage string for the flag
-func (f *BoolFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *BoolFlag) GetValue() string {
- return ""
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *BoolFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- valBool, err := strconv.ParseBool(val)
-
- if err != nil {
- return fmt.Errorf("could not parse %q as bool value for flag %s: %s", val, f.Name, err)
- }
-
- f.Value = valBool
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.BoolVar(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.Bool(name, f.Value, f.Usage)
- }
-
- return nil
-}
-
-// Bool looks up the value of a local BoolFlag, returns
-// false if not found
-func (c *Context) Bool(name string) bool {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupBool(name, fs)
- }
- return false
-}
-
-func lookupBool(name string, set *flag.FlagSet) bool {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseBool(f.Value.String())
- if err != nil {
- return false
- }
- return parsed
- }
- return false
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_duration.go b/vendor/github.com/urfave/cli/v2/flag_duration.go
deleted file mode 100644
index 22a2e6720..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_duration.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "time"
-)
-
-// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration)
-type DurationFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value time.Duration
- DefaultText string
- Destination *time.Duration
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *DurationFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *DurationFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *DurationFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *DurationFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *DurationFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *DurationFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *DurationFlag) GetValue() string {
- return f.Value.String()
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *DurationFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- valDuration, err := time.ParseDuration(val)
-
- if err != nil {
- return fmt.Errorf("could not parse %q as duration value for flag %s: %s", val, f.Name, err)
- }
-
- f.Value = valDuration
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.DurationVar(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.Duration(name, f.Value, f.Usage)
- }
- return nil
-}
-
-// Duration looks up the value of a local DurationFlag, returns
-// 0 if not found
-func (c *Context) Duration(name string) time.Duration {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupDuration(name, fs)
- }
- return 0
-}
-
-func lookupDuration(name string, set *flag.FlagSet) time.Duration {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := time.ParseDuration(f.Value.String())
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_float64.go b/vendor/github.com/urfave/cli/v2/flag_float64.go
deleted file mode 100644
index 91c778c87..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_float64.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "strconv"
-)
-
-// Float64Flag is a flag with type float64
-type Float64Flag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value float64
- DefaultText string
- Destination *float64
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *Float64Flag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *Float64Flag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *Float64Flag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *Float64Flag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *Float64Flag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *Float64Flag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *Float64Flag) GetValue() string {
- return fmt.Sprintf("%f", f.Value)
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *Float64Flag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- valFloat, err := strconv.ParseFloat(val, 10)
-
- if err != nil {
- return fmt.Errorf("could not parse %q as float64 value for flag %s: %s", val, f.Name, err)
- }
-
- f.Value = valFloat
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.Float64Var(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.Float64(name, f.Value, f.Usage)
- }
-
- return nil
-}
-
-// Float64 looks up the value of a local Float64Flag, returns
-// 0 if not found
-func (c *Context) Float64(name string) float64 {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupFloat64(name, fs)
- }
- return 0
-}
-
-func lookupFloat64(name string, set *flag.FlagSet) float64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseFloat(f.Value.String(), 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_float64_slice.go b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go
deleted file mode 100644
index 706ee6cd4..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_float64_slice.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package cli
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "strconv"
- "strings"
-)
-
-// Float64Slice wraps []float64 to satisfy flag.Value
-type Float64Slice struct {
- slice []float64
- hasBeenSet bool
-}
-
-// NewFloat64Slice makes a *Float64Slice with default values
-func NewFloat64Slice(defaults ...float64) *Float64Slice {
- return &Float64Slice{slice: append([]float64{}, defaults...)}
-}
-
-// Set parses the value into a float64 and appends it to the list of values
-func (f *Float64Slice) Set(value string) error {
- if !f.hasBeenSet {
- f.slice = []float64{}
- f.hasBeenSet = true
- }
-
- if strings.HasPrefix(value, slPfx) {
- // Deserializing assumes overwrite
- _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &f.slice)
- f.hasBeenSet = true
- return nil
- }
-
- tmp, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return err
- }
-
- f.slice = append(f.slice, tmp)
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (f *Float64Slice) String() string {
- return fmt.Sprintf("%#v", f.slice)
-}
-
-// Serialize allows Float64Slice to fulfill Serializer
-func (f *Float64Slice) Serialize() string {
- jsonBytes, _ := json.Marshal(f.slice)
- return fmt.Sprintf("%s%s", slPfx, string(jsonBytes))
-}
-
-// Value returns the slice of float64s set by this flag
-func (f *Float64Slice) Value() []float64 {
- return f.slice
-}
-
-// Get returns the slice of float64s set by this flag
-func (f *Float64Slice) Get() interface{} {
- return *f
-}
-
-// Float64SliceFlag is a flag with type *Float64Slice
-type Float64SliceFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value *Float64Slice
- DefaultText string
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *Float64SliceFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *Float64SliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *Float64SliceFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *Float64SliceFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true if the flag takes a value, otherwise false
-func (f *Float64SliceFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *Float64SliceFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *Float64SliceFlag) GetValue() string {
- if f.Value != nil {
- return f.Value.String()
- }
- return ""
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- f.Value = &Float64Slice{}
-
- for _, s := range strings.Split(val, ",") {
- if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
- return fmt.Errorf("could not parse %q as float64 slice value for flag %s: %s", f.Value, f.Name, err)
- }
- }
-
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Value == nil {
- f.Value = &Float64Slice{}
- }
- set.Var(f.Value, name, f.Usage)
- }
-
- return nil
-}
-
-// Float64Slice looks up the value of a local Float64SliceFlag, returns
-// nil if not found
-func (c *Context) Float64Slice(name string) []float64 {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupFloat64Slice(name, fs)
- }
- return nil
-}
-
-func lookupFloat64Slice(name string, set *flag.FlagSet) []float64 {
- f := set.Lookup(name)
- if f != nil {
- if slice, ok := f.Value.(*Float64Slice); ok {
- return slice.Value()
- }
- }
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go
deleted file mode 100644
index b0c8ff44d..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_generic.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
-)
-
-// Generic is a generic parseable type identified by a specific flag
-type Generic interface {
- Set(value string) error
- String() string
-}
-
-// GenericFlag is a flag with type Generic
-type GenericFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- TakesFile bool
- Value Generic
- DefaultText string
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *GenericFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *GenericFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *GenericFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *GenericFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *GenericFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *GenericFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *GenericFlag) GetValue() string {
- if f.Value != nil {
- return f.Value.String()
- }
- return ""
-}
-
-// Apply takes the flagset and calls Set on the generic flag with the value
-// provided by the user for parsing by the flag
-func (f GenericFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- if err := f.Value.Set(val); err != nil {
- return fmt.Errorf("could not parse %q as value for flag %s: %s", val, f.Name, err)
- }
-
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- set.Var(f.Value, name, f.Usage)
- }
-
- return nil
-}
-
-// Generic looks up the value of a local GenericFlag, returns
-// nil if not found
-func (c *Context) Generic(name string) interface{} {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupGeneric(name, fs)
- }
- return nil
-}
-
-func lookupGeneric(name string, set *flag.FlagSet) interface{} {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value, error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_int.go b/vendor/github.com/urfave/cli/v2/flag_int.go
deleted file mode 100644
index ac39d4a9e..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_int.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "strconv"
-)
-
-// IntFlag is a flag with type int
-type IntFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value int
- DefaultText string
- Destination *int
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *IntFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *IntFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *IntFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *IntFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *IntFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *IntFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *IntFlag) GetValue() string {
- return fmt.Sprintf("%d", f.Value)
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *IntFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- valInt, err := strconv.ParseInt(val, 0, 64)
-
- if err != nil {
- return fmt.Errorf("could not parse %q as int value for flag %s: %s", val, f.Name, err)
- }
-
- f.Value = int(valInt)
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.IntVar(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.Int(name, f.Value, f.Usage)
- }
-
- return nil
-}
-
-// Int looks up the value of a local IntFlag, returns
-// 0 if not found
-func (c *Context) Int(name string) int {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupInt(name, fs)
- }
- return 0
-}
-
-func lookupInt(name string, set *flag.FlagSet) int {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return int(parsed)
- }
- return 0
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_int64.go b/vendor/github.com/urfave/cli/v2/flag_int64.go
deleted file mode 100644
index e09991269..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_int64.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "strconv"
-)
-
-// Int64Flag is a flag with type int64
-type Int64Flag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value int64
- DefaultText string
- Destination *int64
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *Int64Flag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *Int64Flag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *Int64Flag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *Int64Flag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *Int64Flag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *Int64Flag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *Int64Flag) GetValue() string {
- return fmt.Sprintf("%d", f.Value)
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *Int64Flag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- valInt, err := strconv.ParseInt(val, 0, 64)
-
- if err != nil {
- return fmt.Errorf("could not parse %q as int value for flag %s: %s", val, f.Name, err)
- }
-
- f.Value = valInt
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.Int64Var(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.Int64(name, f.Value, f.Usage)
- }
- return nil
-}
-
-// Int64 looks up the value of a local Int64Flag, returns
-// 0 if not found
-func (c *Context) Int64(name string) int64 {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupInt64(name, fs)
- }
- return 0
-}
-
-func lookupInt64(name string, set *flag.FlagSet) int64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_int64_slice.go b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go
deleted file mode 100644
index 6c7fd9376..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_int64_slice.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package cli
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "strconv"
- "strings"
-)
-
-// Int64Slice wraps []int64 to satisfy flag.Value
-type Int64Slice struct {
- slice []int64
- hasBeenSet bool
-}
-
-// NewInt64Slice makes an *Int64Slice with default values
-func NewInt64Slice(defaults ...int64) *Int64Slice {
- return &Int64Slice{slice: append([]int64{}, defaults...)}
-}
-
-// Set parses the value into an integer and appends it to the list of values
-func (i *Int64Slice) Set(value string) error {
- if !i.hasBeenSet {
- i.slice = []int64{}
- i.hasBeenSet = true
- }
-
- if strings.HasPrefix(value, slPfx) {
- // Deserializing assumes overwrite
- _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice)
- i.hasBeenSet = true
- return nil
- }
-
- tmp, err := strconv.ParseInt(value, 0, 64)
- if err != nil {
- return err
- }
-
- i.slice = append(i.slice, tmp)
-
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (i *Int64Slice) String() string {
- return fmt.Sprintf("%#v", i.slice)
-}
-
-// Serialize allows Int64Slice to fulfill Serializer
-func (i *Int64Slice) Serialize() string {
- jsonBytes, _ := json.Marshal(i.slice)
- return fmt.Sprintf("%s%s", slPfx, string(jsonBytes))
-}
-
-// Value returns the slice of ints set by this flag
-func (i *Int64Slice) Value() []int64 {
- return i.slice
-}
-
-// Get returns the slice of ints set by this flag
-func (i *Int64Slice) Get() interface{} {
- return *i
-}
-
-// Int64SliceFlag is a flag with type *Int64Slice
-type Int64SliceFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value *Int64Slice
- DefaultText string
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *Int64SliceFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *Int64SliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *Int64SliceFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *Int64SliceFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *Int64SliceFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f Int64SliceFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *Int64SliceFlag) GetValue() string {
- if f.Value != nil {
- return f.Value.String()
- }
- return ""
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- f.Value = &Int64Slice{}
-
- for _, s := range strings.Split(val, ",") {
- if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
- return fmt.Errorf("could not parse %q as int64 slice value for flag %s: %s", val, f.Name, err)
- }
- }
-
- f.HasBeenSet = true
- }
-
- for _, name := range f.Names() {
- if f.Value == nil {
- f.Value = &Int64Slice{}
- }
- set.Var(f.Value, name, f.Usage)
- }
-
- return nil
-}
-
-// Int64Slice looks up the value of a local Int64SliceFlag, returns
-// nil if not found
-func (c *Context) Int64Slice(name string) []int64 {
- return lookupInt64Slice(name, c.flagSet)
-}
-
-func lookupInt64Slice(name string, set *flag.FlagSet) []int64 {
- f := set.Lookup(name)
- if f != nil {
- if slice, ok := f.Value.(*Int64Slice); ok {
- return slice.Value()
- }
- }
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_int_slice.go b/vendor/github.com/urfave/cli/v2/flag_int_slice.go
deleted file mode 100644
index 4e0afc021..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_int_slice.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package cli
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "strconv"
- "strings"
-)
-
-// IntSlice wraps []int to satisfy flag.Value
-type IntSlice struct {
- slice []int
- hasBeenSet bool
-}
-
-// NewIntSlice makes an *IntSlice with default values
-func NewIntSlice(defaults ...int) *IntSlice {
- return &IntSlice{slice: append([]int{}, defaults...)}
-}
-
-// TODO: Consistently have specific Set function for Int64 and Float64 ?
-// SetInt directly adds an integer to the list of values
-func (i *IntSlice) SetInt(value int) {
- if !i.hasBeenSet {
- i.slice = []int{}
- i.hasBeenSet = true
- }
-
- i.slice = append(i.slice, value)
-}
-
-// Set parses the value into an integer and appends it to the list of values
-func (i *IntSlice) Set(value string) error {
- if !i.hasBeenSet {
- i.slice = []int{}
- i.hasBeenSet = true
- }
-
- if strings.HasPrefix(value, slPfx) {
- // Deserializing assumes overwrite
- _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice)
- i.hasBeenSet = true
- return nil
- }
-
- tmp, err := strconv.ParseInt(value, 0, 64)
- if err != nil {
- return err
- }
-
- i.slice = append(i.slice, int(tmp))
-
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (i *IntSlice) String() string {
- return fmt.Sprintf("%#v", i.slice)
-}
-
-// Serialize allows IntSlice to fulfill Serializer
-func (i *IntSlice) Serialize() string {
- jsonBytes, _ := json.Marshal(i.slice)
- return fmt.Sprintf("%s%s", slPfx, string(jsonBytes))
-}
-
-// Value returns the slice of ints set by this flag
-func (i *IntSlice) Value() []int {
- return i.slice
-}
-
-// Get returns the slice of ints set by this flag
-func (i *IntSlice) Get() interface{} {
- return *i
-}
-
-// IntSliceFlag is a flag with type *IntSlice
-type IntSliceFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value *IntSlice
- DefaultText string
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *IntSliceFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *IntSliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *IntSliceFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *IntSliceFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *IntSliceFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f IntSliceFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *IntSliceFlag) GetValue() string {
- if f.Value != nil {
- return f.Value.String()
- }
- return ""
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- f.Value = &IntSlice{}
-
- for _, s := range strings.Split(val, ",") {
- if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
- return fmt.Errorf("could not parse %q as int slice value for flag %s: %s", val, f.Name, err)
- }
- }
-
- f.HasBeenSet = true
- }
-
- for _, name := range f.Names() {
- if f.Value == nil {
- f.Value = &IntSlice{}
- }
- set.Var(f.Value, name, f.Usage)
- }
-
- return nil
-}
-
-// IntSlice looks up the value of a local IntSliceFlag, returns
-// nil if not found
-func (c *Context) IntSlice(name string) []int {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupIntSlice(name, c.flagSet)
- }
- return nil
-}
-
-func lookupIntSlice(name string, set *flag.FlagSet) []int {
- f := set.Lookup(name)
- if f != nil {
- if slice, ok := f.Value.(*IntSlice); ok {
- return slice.Value()
- }
- }
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_path.go b/vendor/github.com/urfave/cli/v2/flag_path.go
deleted file mode 100644
index 8070dc4b0..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_path.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package cli
-
-import "flag"
-
-type PathFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- TakesFile bool
- Value string
- DefaultText string
- Destination *string
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *PathFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *PathFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *PathFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *PathFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *PathFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *PathFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *PathFlag) GetValue() string {
- return f.Value
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *PathFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- f.Value = val
- f.HasBeenSet = true
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.StringVar(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.String(name, f.Value, f.Usage)
- }
-
- return nil
-}
-
-// Path looks up the value of a local PathFlag, returns
-// "" if not found
-func (c *Context) Path(name string) string {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupPath(name, fs)
- }
-
- return ""
-}
-
-func lookupPath(name string, set *flag.FlagSet) string {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value.String(), error(nil)
- if err != nil {
- return ""
- }
- return parsed
- }
- return ""
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_string.go b/vendor/github.com/urfave/cli/v2/flag_string.go
deleted file mode 100644
index 400bb532e..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_string.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package cli
-
-import "flag"
-
-// StringFlag is a flag with type string
-type StringFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- TakesFile bool
- Value string
- DefaultText string
- Destination *string
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *StringFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *StringFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *StringFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *StringFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *StringFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *StringFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *StringFlag) GetValue() string {
- return f.Value
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *StringFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- f.Value = val
- f.HasBeenSet = true
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.StringVar(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.String(name, f.Value, f.Usage)
- }
-
- return nil
-}
-
-// String looks up the value of a local StringFlag, returns
-// "" if not found
-func (c *Context) String(name string) string {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupString(name, fs)
- }
- return ""
-}
-
-func lookupString(name string, set *flag.FlagSet) string {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value.String(), error(nil)
- if err != nil {
- return ""
- }
- return parsed
- }
- return ""
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_string_slice.go b/vendor/github.com/urfave/cli/v2/flag_string_slice.go
deleted file mode 100644
index 35497032c..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_string_slice.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package cli
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "strings"
-)
-
-// StringSlice wraps a []string to satisfy flag.Value
-type StringSlice struct {
- slice []string
- hasBeenSet bool
-}
-
-// NewStringSlice creates a *StringSlice with default values
-func NewStringSlice(defaults ...string) *StringSlice {
- return &StringSlice{slice: append([]string{}, defaults...)}
-}
-
-// Set appends the string value to the list of values
-func (s *StringSlice) Set(value string) error {
- if !s.hasBeenSet {
- s.slice = []string{}
- s.hasBeenSet = true
- }
-
- if strings.HasPrefix(value, slPfx) {
- // Deserializing assumes overwrite
- _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &s.slice)
- s.hasBeenSet = true
- return nil
- }
-
- s.slice = append(s.slice, value)
-
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (s *StringSlice) String() string {
- return fmt.Sprintf("%s", s.slice)
-}
-
-// Serialize allows StringSlice to fulfill Serializer
-func (s *StringSlice) Serialize() string {
- jsonBytes, _ := json.Marshal(s.slice)
- return fmt.Sprintf("%s%s", slPfx, string(jsonBytes))
-}
-
-// Value returns the slice of strings set by this flag
-func (s *StringSlice) Value() []string {
- return s.slice
-}
-
-// Get returns the slice of strings set by this flag
-func (s *StringSlice) Get() interface{} {
- return *s
-}
-
-// StringSliceFlag is a flag with type *StringSlice
-type StringSliceFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- TakesFile bool
- Value *StringSlice
- DefaultText string
- HasBeenSet bool
- Destination *StringSlice
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *StringSliceFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *StringSliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *StringSliceFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *StringSliceFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *StringSliceFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *StringSliceFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *StringSliceFlag) GetValue() string {
- if f.Value != nil {
- return f.Value.String()
- }
- return ""
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
-
- if f.Destination != nil && f.Value != nil {
- f.Destination.slice = make([]string, len(f.Value.slice))
- copy(f.Destination.slice, f.Value.slice)
-
- }
-
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if f.Value == nil {
- f.Value = &StringSlice{}
- }
- destination := f.Value
- if f.Destination != nil {
- destination = f.Destination
- }
-
- for _, s := range strings.Split(val, ",") {
- if err := destination.Set(strings.TrimSpace(s)); err != nil {
- return fmt.Errorf("could not parse %q as string value for flag %s: %s", val, f.Name, err)
- }
- }
-
- // Set this to false so that we reset the slice if we then set values from
- // flags that have already been set by the environment.
- destination.hasBeenSet = false
- f.HasBeenSet = true
- }
-
- for _, name := range f.Names() {
- if f.Value == nil {
- f.Value = &StringSlice{}
- }
-
- if f.Destination != nil {
- set.Var(f.Destination, name, f.Usage)
- continue
- }
-
- set.Var(f.Value, name, f.Usage)
- }
-
- return nil
-}
-
-// StringSlice looks up the value of a local StringSliceFlag, returns
-// nil if not found
-func (c *Context) StringSlice(name string) []string {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupStringSlice(name, fs)
- }
- return nil
-}
-
-func lookupStringSlice(name string, set *flag.FlagSet) []string {
- f := set.Lookup(name)
- if f != nil {
- if slice, ok := f.Value.(*StringSlice); ok {
- return slice.Value()
- }
- }
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_timestamp.go b/vendor/github.com/urfave/cli/v2/flag_timestamp.go
deleted file mode 100644
index 0382a6b9d..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_timestamp.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "time"
-)
-
-// Timestamp wrap to satisfy golang's flag interface.
-type Timestamp struct {
- timestamp *time.Time
- hasBeenSet bool
- layout string
-}
-
-// Timestamp constructor
-func NewTimestamp(timestamp time.Time) *Timestamp {
- return &Timestamp{timestamp: ×tamp}
-}
-
-// Set the timestamp value directly
-func (t *Timestamp) SetTimestamp(value time.Time) {
- if !t.hasBeenSet {
- t.timestamp = &value
- t.hasBeenSet = true
- }
-}
-
-// Set the timestamp string layout for future parsing
-func (t *Timestamp) SetLayout(layout string) {
- t.layout = layout
-}
-
-// Parses the string value to timestamp
-func (t *Timestamp) Set(value string) error {
- timestamp, err := time.Parse(t.layout, value)
- if err != nil {
- return err
- }
-
- t.timestamp = ×tamp
- t.hasBeenSet = true
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (t *Timestamp) String() string {
- return fmt.Sprintf("%#v", t.timestamp)
-}
-
-// Value returns the timestamp value stored in the flag
-func (t *Timestamp) Value() *time.Time {
- return t.timestamp
-}
-
-// Get returns the flag structure
-func (t *Timestamp) Get() interface{} {
- return *t
-}
-
-// TimestampFlag is a flag with type time
-type TimestampFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Layout string
- Value *Timestamp
- DefaultText string
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *TimestampFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *TimestampFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *TimestampFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *TimestampFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *TimestampFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *TimestampFlag) GetUsage() string {
- return f.Usage
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *TimestampFlag) GetValue() string {
- if f.Value != nil {
- return f.Value.timestamp.String()
- }
- return ""
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *TimestampFlag) Apply(set *flag.FlagSet) error {
- if f.Layout == "" {
- return fmt.Errorf("timestamp Layout is required")
- }
- if f.Value == nil {
- f.Value = &Timestamp{}
- }
- f.Value.SetLayout(f.Layout)
-
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if err := f.Value.Set(val); err != nil {
- return fmt.Errorf("could not parse %q as timestamp value for flag %s: %s", val, f.Name, err)
- }
- f.HasBeenSet = true
- }
-
- for _, name := range f.Names() {
- set.Var(f.Value, name, f.Usage)
- }
- return nil
-}
-
-// Timestamp gets the timestamp from a flag name
-func (c *Context) Timestamp(name string) *time.Time {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupTimestamp(name, fs)
- }
- return nil
-}
-
-// Fetches the timestamp value from the local timestampWrap
-func lookupTimestamp(name string, set *flag.FlagSet) *time.Time {
- f := set.Lookup(name)
- if f != nil {
- return (f.Value.(*Timestamp)).Value()
- }
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_uint.go b/vendor/github.com/urfave/cli/v2/flag_uint.go
deleted file mode 100644
index 2e5e76b0e..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_uint.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "strconv"
-)
-
-// UintFlag is a flag with type uint
-type UintFlag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value uint
- DefaultText string
- Destination *uint
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *UintFlag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *UintFlag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *UintFlag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *UintFlag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *UintFlag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *UintFlag) GetUsage() string {
- return f.Usage
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *UintFlag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- valInt, err := strconv.ParseUint(val, 0, 64)
- if err != nil {
- return fmt.Errorf("could not parse %q as uint value for flag %s: %s", val, f.Name, err)
- }
-
- f.Value = uint(valInt)
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.UintVar(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.Uint(name, f.Value, f.Usage)
- }
-
- return nil
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *UintFlag) GetValue() string {
- return fmt.Sprintf("%d", f.Value)
-}
-
-// Uint looks up the value of a local UintFlag, returns
-// 0 if not found
-func (c *Context) Uint(name string) uint {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupUint(name, fs)
- }
- return 0
-}
-
-func lookupUint(name string, set *flag.FlagSet) uint {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return uint(parsed)
- }
- return 0
-}
diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64.go b/vendor/github.com/urfave/cli/v2/flag_uint64.go
deleted file mode 100644
index 8fc3289d8..000000000
--- a/vendor/github.com/urfave/cli/v2/flag_uint64.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "strconv"
-)
-
-// Uint64Flag is a flag with type uint64
-type Uint64Flag struct {
- Name string
- Aliases []string
- Usage string
- EnvVars []string
- FilePath string
- Required bool
- Hidden bool
- Value uint64
- DefaultText string
- Destination *uint64
- HasBeenSet bool
-}
-
-// IsSet returns whether or not the flag has been set through env or file
-func (f *Uint64Flag) IsSet() bool {
- return f.HasBeenSet
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f *Uint64Flag) String() string {
- return FlagStringer(f)
-}
-
-// Names returns the names of the flag
-func (f *Uint64Flag) Names() []string {
- return flagNames(f.Name, f.Aliases)
-}
-
-// IsRequired returns whether or not the flag is required
-func (f *Uint64Flag) IsRequired() bool {
- return f.Required
-}
-
-// TakesValue returns true of the flag takes a value, otherwise false
-func (f *Uint64Flag) TakesValue() bool {
- return true
-}
-
-// GetUsage returns the usage string for the flag
-func (f *Uint64Flag) GetUsage() string {
- return f.Usage
-}
-
-// Apply populates the flag given the flag set and environment
-func (f *Uint64Flag) Apply(set *flag.FlagSet) error {
- if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
- if val != "" {
- valInt, err := strconv.ParseUint(val, 0, 64)
- if err != nil {
- return fmt.Errorf("could not parse %q as uint64 value for flag %s: %s", val, f.Name, err)
- }
-
- f.Value = valInt
- f.HasBeenSet = true
- }
- }
-
- for _, name := range f.Names() {
- if f.Destination != nil {
- set.Uint64Var(f.Destination, name, f.Value, f.Usage)
- continue
- }
- set.Uint64(name, f.Value, f.Usage)
- }
-
- return nil
-}
-
-// GetValue returns the flags value as string representation and an empty
-// string if the flag takes no value at all.
-func (f *Uint64Flag) GetValue() string {
- return fmt.Sprintf("%d", f.Value)
-}
-
-// Uint64 looks up the value of a local Uint64Flag, returns
-// 0 if not found
-func (c *Context) Uint64(name string) uint64 {
- if fs := lookupFlagSet(name, c); fs != nil {
- return lookupUint64(name, fs)
- }
- return 0
-}
-
-func lookupUint64(name string, set *flag.FlagSet) uint64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
diff --git a/vendor/github.com/urfave/cli/v2/funcs.go b/vendor/github.com/urfave/cli/v2/funcs.go
deleted file mode 100644
index 474c48faf..000000000
--- a/vendor/github.com/urfave/cli/v2/funcs.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package cli
-
-// BashCompleteFunc is an action to execute when the shell completion flag is set
-type BashCompleteFunc func(*Context)
-
-// BeforeFunc is an action to execute before any subcommands are run, but after
-// the context is ready if a non-nil error is returned, no subcommands are run
-type BeforeFunc func(*Context) error
-
-// AfterFunc is an action to execute after any subcommands are run, but after the
-// subcommand has finished it is run even if Action() panics
-type AfterFunc func(*Context) error
-
-// ActionFunc is the action to execute when no subcommands are specified
-type ActionFunc func(*Context) error
-
-// CommandNotFoundFunc is executed if the proper command cannot be found
-type CommandNotFoundFunc func(*Context, string)
-
-// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying
-// customized usage error messages. This function is able to replace the
-// original error messages. If this function is not set, the "Incorrect usage"
-// is displayed and the execution is interrupted.
-type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error
-
-// ExitErrHandlerFunc is executed if provided in order to handle exitError values
-// returned by Actions and Before/After functions.
-type ExitErrHandlerFunc func(context *Context, err error)
-
-// FlagStringFunc is used by the help generation to display a flag, which is
-// expected to be a single line.
-type FlagStringFunc func(Flag) string
-
-// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix
-// text for a flag's full name.
-type FlagNamePrefixFunc func(fullName []string, placeholder string) string
-
-// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help
-// with the environment variable details.
-type FlagEnvHintFunc func(envVars []string, str string) string
-
-// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help
-// with the file path details.
-type FlagFileHintFunc func(filePath, str string) string
diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go
deleted file mode 100644
index 0a421ee99..000000000
--- a/vendor/github.com/urfave/cli/v2/help.go
+++ /dev/null
@@ -1,386 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
- "text/tabwriter"
- "text/template"
- "unicode/utf8"
-)
-
-var helpCommand = &Command{
- Name: "help",
- Aliases: []string{"h"},
- Usage: "Shows a list of commands or help for one command",
- ArgsUsage: "[command]",
- Action: func(c *Context) error {
- args := c.Args()
- if args.Present() {
- return ShowCommandHelp(c, args.First())
- }
-
- _ = ShowAppHelp(c)
- return nil
- },
-}
-
-var helpSubcommand = &Command{
- Name: "help",
- Aliases: []string{"h"},
- Usage: "Shows a list of commands or help for one command",
- ArgsUsage: "[command]",
- Action: func(c *Context) error {
- args := c.Args()
- if args.Present() {
- return ShowCommandHelp(c, args.First())
- }
-
- return ShowSubcommandHelp(c)
- },
-}
-
-// Prints help for the App or Command
-type helpPrinter func(w io.Writer, templ string, data interface{})
-
-// Prints help for the App or Command with custom template function.
-type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{})
-
-// HelpPrinter is a function that writes the help output. If not set explicitly,
-// this calls HelpPrinterCustom using only the default template functions.
-//
-// If custom logic for printing help is required, this function can be
-// overridden. If the ExtraInfo field is defined on an App, this function
-// should not be modified, as HelpPrinterCustom will be used directly in order
-// to capture the extra information.
-var HelpPrinter helpPrinter = printHelp
-
-// HelpPrinterCustom is a function that writes the help output. It is used as
-// the default implementation of HelpPrinter, and may be called directly if
-// the ExtraInfo field is set on an App.
-var HelpPrinterCustom helpPrinterCustom = printHelpCustom
-
-// VersionPrinter prints the version for the App
-var VersionPrinter = printVersion
-
-// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code.
-func ShowAppHelpAndExit(c *Context, exitCode int) {
- _ = ShowAppHelp(c)
- os.Exit(exitCode)
-}
-
-// ShowAppHelp is an action that displays the help.
-func ShowAppHelp(c *Context) error {
- tpl := c.App.CustomAppHelpTemplate
- if tpl == "" {
- tpl = AppHelpTemplate
- }
-
- if c.App.ExtraInfo == nil {
- HelpPrinter(c.App.Writer, tpl, c.App)
- return nil
- }
-
- customAppData := func() map[string]interface{} {
- return map[string]interface{}{
- "ExtraInfo": c.App.ExtraInfo,
- }
- }
- HelpPrinterCustom(c.App.Writer, tpl, c.App, customAppData())
-
- return nil
-}
-
-// DefaultAppComplete prints the list of subcommands as the default app completion method
-func DefaultAppComplete(c *Context) {
- DefaultCompleteWithFlags(nil)(c)
-}
-
-func printCommandSuggestions(commands []*Command, writer io.Writer) {
- for _, command := range commands {
- if command.Hidden {
- continue
- }
- if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" {
- for _, name := range command.Names() {
- _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage)
- }
- } else {
- for _, name := range command.Names() {
- _, _ = fmt.Fprintf(writer, "%s\n", name)
- }
- }
- }
-}
-
-func cliArgContains(flagName string) bool {
- for _, name := range strings.Split(flagName, ",") {
- name = strings.TrimSpace(name)
- count := utf8.RuneCountInString(name)
- if count > 2 {
- count = 2
- }
- flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name)
- for _, a := range os.Args {
- if a == flag {
- return true
- }
- }
- }
- return false
-}
-
-func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) {
- cur := strings.TrimPrefix(lastArg, "-")
- cur = strings.TrimPrefix(cur, "-")
- for _, flag := range flags {
- if bflag, ok := flag.(*BoolFlag); ok && bflag.Hidden {
- continue
- }
- for _, name := range flag.Names() {
- name = strings.TrimSpace(name)
- // this will get total count utf8 letters in flag name
- count := utf8.RuneCountInString(name)
- if count > 2 {
- count = 2 // resuse this count to generate single - or -- in flag completion
- }
- // if flag name has more than one utf8 letter and last argument in cli has -- prefix then
- // skip flag completion for short flags example -v or -x
- if strings.HasPrefix(lastArg, "--") && count == 1 {
- continue
- }
- // match if last argument matches this flag and it is not repeated
- if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(name) {
- flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name)
- _, _ = fmt.Fprintln(writer, flagCompletion)
- }
- }
- }
-}
-
-func DefaultCompleteWithFlags(cmd *Command) func(c *Context) {
- return func(c *Context) {
- if len(os.Args) > 2 {
- lastArg := os.Args[len(os.Args)-2]
- if strings.HasPrefix(lastArg, "-") {
- printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer)
- if cmd != nil {
- printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer)
- }
- return
- }
- }
- if cmd != nil {
- printCommandSuggestions(cmd.Subcommands, c.App.Writer)
- } else {
- printCommandSuggestions(c.App.Commands, c.App.Writer)
- }
- }
-}
-
-// ShowCommandHelpAndExit - exits with code after showing help
-func ShowCommandHelpAndExit(c *Context, command string, code int) {
- _ = ShowCommandHelp(c, command)
- os.Exit(code)
-}
-
-// ShowCommandHelp prints help for the given command
-func ShowCommandHelp(ctx *Context, command string) error {
- // show the subcommand help for a command with subcommands
- if command == "" {
- HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
- return nil
- }
-
- for _, c := range ctx.App.Commands {
- if c.HasName(command) {
- templ := c.CustomHelpTemplate
- if templ == "" {
- templ = CommandHelpTemplate
- }
-
- HelpPrinter(ctx.App.Writer, templ, c)
-
- return nil
- }
- }
-
- if ctx.App.CommandNotFound == nil {
- return Exit(fmt.Sprintf("No help topic for '%v'", command), 3)
- }
-
- ctx.App.CommandNotFound(ctx, command)
- return nil
-}
-
-// ShowSubcommandHelpAndExit - Prints help for the given subcommand and exits with exit code.
-func ShowSubcommandHelpAndExit(c *Context, exitCode int) {
- _ = ShowSubcommandHelp(c)
- os.Exit(exitCode)
-}
-
-// ShowSubcommandHelp prints help for the given subcommand
-func ShowSubcommandHelp(c *Context) error {
- if c == nil {
- return nil
- }
-
- if c.Command != nil {
- return ShowCommandHelp(c, c.Command.Name)
- }
-
- return ShowCommandHelp(c, "")
-}
-
-// ShowVersion prints the version number of the App
-func ShowVersion(c *Context) {
- VersionPrinter(c)
-}
-
-func printVersion(c *Context) {
- _, _ = fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
-}
-
-// ShowCompletions prints the lists of commands within a given context
-func ShowCompletions(c *Context) {
- a := c.App
- if a != nil && a.BashComplete != nil {
- a.BashComplete(c)
- }
-}
-
-// ShowCommandCompletions prints the custom completions for a given command
-func ShowCommandCompletions(ctx *Context, command string) {
- c := ctx.App.Command(command)
- if c != nil {
- if c.BashComplete != nil {
- c.BashComplete(ctx)
- } else {
- DefaultCompleteWithFlags(c)(ctx)
- }
- }
-
-}
-
-// printHelpCustom is the default implementation of HelpPrinterCustom.
-//
-// The customFuncs map will be combined with a default template.FuncMap to
-// allow using arbitrary functions in template rendering.
-func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) {
- funcMap := template.FuncMap{
- "join": strings.Join,
- "indent": indent,
- "nindent": nindent,
- "trim": strings.TrimSpace,
- }
- for key, value := range customFuncs {
- funcMap[key] = value
- }
-
- w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
- t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
-
- err := t.Execute(w, data)
- if err != nil {
- // If the writer is closed, t.Execute will fail, and there's nothing
- // we can do to recover.
- if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
- _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
- }
- return
- }
- _ = w.Flush()
-}
-
-func printHelp(out io.Writer, templ string, data interface{}) {
- HelpPrinterCustom(out, templ, data, nil)
-}
-
-func checkVersion(c *Context) bool {
- found := false
- for _, name := range VersionFlag.Names() {
- if c.Bool(name) {
- found = true
- }
- }
- return found
-}
-
-func checkHelp(c *Context) bool {
- found := false
- for _, name := range HelpFlag.Names() {
- if c.Bool(name) {
- found = true
- }
- }
- return found
-}
-
-func checkCommandHelp(c *Context, name string) bool {
- if c.Bool("h") || c.Bool("help") {
- _ = ShowCommandHelp(c, name)
- return true
- }
-
- return false
-}
-
-func checkSubcommandHelp(c *Context) bool {
- if c.Bool("h") || c.Bool("help") {
- _ = ShowSubcommandHelp(c)
- return true
- }
-
- return false
-}
-
-func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
- if !a.EnableBashCompletion {
- return false, arguments
- }
-
- pos := len(arguments) - 1
- lastArg := arguments[pos]
-
- if lastArg != "--generate-bash-completion" {
- return false, arguments
- }
-
- return true, arguments[:pos]
-}
-
-func checkCompletions(c *Context) bool {
- if !c.shellComplete {
- return false
- }
-
- if args := c.Args(); args.Present() {
- name := args.First()
- if cmd := c.App.Command(name); cmd != nil {
- // let the command handle the completion
- return false
- }
- }
-
- ShowCompletions(c)
- return true
-}
-
-func checkCommandCompletions(c *Context, name string) bool {
- if !c.shellComplete {
- return false
- }
-
- ShowCommandCompletions(c, name)
- return true
-}
-
-func indent(spaces int, v string) string {
- pad := strings.Repeat(" ", spaces)
- return pad + strings.Replace(v, "\n", "\n"+pad, -1)
-}
-
-func nindent(spaces int, v string) string {
- return "\n" + indent(spaces, v)
-}
diff --git a/vendor/github.com/urfave/cli/v2/parse.go b/vendor/github.com/urfave/cli/v2/parse.go
deleted file mode 100644
index 7df17296a..000000000
--- a/vendor/github.com/urfave/cli/v2/parse.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package cli
-
-import (
- "flag"
- "strings"
-)
-
-type iterativeParser interface {
- newFlagSet() (*flag.FlagSet, error)
- useShortOptionHandling() bool
-}
-
-// To enable short-option handling (e.g., "-it" vs "-i -t") we have to
-// iteratively catch parsing errors. This way we achieve LR parsing without
-// transforming any arguments. Otherwise, there is no way we can discriminate
-// combined short options from common arguments that should be left untouched.
-// Pass `shellComplete` to continue parsing options on failure during shell
-// completion when, the user-supplied options may be incomplete.
-func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComplete bool) error {
- for {
- err := set.Parse(args)
- if !ip.useShortOptionHandling() || err == nil {
- if shellComplete {
- return nil
- }
- return err
- }
-
- errStr := err.Error()
- trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: -")
- if errStr == trimmed {
- return err
- }
-
- // regenerate the initial args with the split short opts
- argsWereSplit := false
- for i, arg := range args {
- // skip args that are not part of the error message
- if name := strings.TrimLeft(arg, "-"); name != trimmed {
- continue
- }
-
- // if we can't split, the error was accurate
- shortOpts := splitShortOptions(set, arg)
- if len(shortOpts) == 1 {
- return err
- }
-
- // swap current argument with the split version
- args = append(args[:i], append(shortOpts, args[i+1:]...)...)
- argsWereSplit = true
- break
- }
-
- // This should be an impossible to reach code path, but in case the arg
- // splitting failed to happen, this will prevent infinite loops
- if !argsWereSplit {
- return err
- }
-
- // Since custom parsing failed, replace the flag set before retrying
- newSet, err := ip.newFlagSet()
- if err != nil {
- return err
- }
- *set = *newSet
- }
-}
-
-func splitShortOptions(set *flag.FlagSet, arg string) []string {
- shortFlagsExist := func(s string) bool {
- for _, c := range s[1:] {
- if f := set.Lookup(string(c)); f == nil {
- return false
- }
- }
- return true
- }
-
- if !isSplittable(arg) || !shortFlagsExist(arg) {
- return []string{arg}
- }
-
- separated := make([]string, 0, len(arg)-1)
- for _, flagChar := range arg[1:] {
- separated = append(separated, "-"+string(flagChar))
- }
-
- return separated
-}
-
-func isSplittable(flagArg string) bool {
- return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2
-}
diff --git a/vendor/github.com/urfave/cli/v2/sort.go b/vendor/github.com/urfave/cli/v2/sort.go
deleted file mode 100644
index 23d1c2f77..000000000
--- a/vendor/github.com/urfave/cli/v2/sort.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package cli
-
-import "unicode"
-
-// lexicographicLess compares strings alphabetically considering case.
-func lexicographicLess(i, j string) bool {
- iRunes := []rune(i)
- jRunes := []rune(j)
-
- lenShared := len(iRunes)
- if lenShared > len(jRunes) {
- lenShared = len(jRunes)
- }
-
- for index := 0; index < lenShared; index++ {
- ir := iRunes[index]
- jr := jRunes[index]
-
- if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr {
- return lir < ljr
- }
-
- if ir != jr {
- return ir < jr
- }
- }
-
- return i < j
-}
diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go
deleted file mode 100644
index 31c03f81c..000000000
--- a/vendor/github.com/urfave/cli/v2/template.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package cli
-
-// AppHelpTemplate is the text template for the Default help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var AppHelpTemplate = `NAME:
- {{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
-
-USAGE:
- {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
-
-VERSION:
- {{.Version}}{{end}}{{end}}{{if .Description}}
-
-DESCRIPTION:
- {{.Description | nindent 3 | trim}}{{end}}{{if len .Authors}}
-
-AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
- {{range $index, $author := .Authors}}{{if $index}}
- {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
-
-COMMANDS:{{range .VisibleCategories}}{{if .Name}}
- {{.Name}}:{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
-
-GLOBAL OPTIONS:
- {{range $index, $option := .VisibleFlags}}{{if $index}}
- {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}}
-
-COPYRIGHT:
- {{.Copyright}}{{end}}
-`
-
-// CommandHelpTemplate is the text template for the command help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var CommandHelpTemplate = `NAME:
- {{.HelpName}} - {{.Usage}}
-
-USAGE:
- {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
-
-CATEGORY:
- {{.Category}}{{end}}{{if .Description}}
-
-DESCRIPTION:
- {{.Description | nindent 3 | trim}}{{end}}{{if .VisibleFlags}}
-
-OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}
-`
-
-// SubcommandHelpTemplate is the text template for the subcommand help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var SubcommandHelpTemplate = `NAME:
- {{.HelpName}} - {{.Usage}}
-
-USAGE:
- {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
-
-DESCRIPTION:
- {{.Description | nindent 3 | trim}}{{end}}
-
-COMMANDS:{{range .VisibleCategories}}{{if .Name}}
- {{.Name}}:{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
-
-OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}
-`
-
-var MarkdownDocTemplate = `% {{ .App.Name }} 8
-
-# NAME
-
-{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }}
-
-# SYNOPSIS
-
-{{ .App.Name }}
-{{ if .SynopsisArgs }}
-` + "```" + `
-{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + `
-{{ end }}{{ if .App.UsageText }}
-# DESCRIPTION
-
-{{ .App.UsageText }}
-{{ end }}
-**Usage**:
-
-` + "```" + `
-{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
-` + "```" + `
-{{ if .GlobalArgs }}
-# GLOBAL OPTIONS
-{{ range $v := .GlobalArgs }}
-{{ $v }}{{ end }}
-{{ end }}{{ if .Commands }}
-# COMMANDS
-{{ range $v := .Commands }}
-{{ $v }}{{ end }}{{ end }}`
-
-var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion
-
-function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet'
- for i in (commandline -opc)
- if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }}
- return 1
- end
- end
- return 0
-end
-
-{{ range $v := .Completions }}{{ $v }}
-{{ end }}`
diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore
new file mode 100644
index 000000000..12411127b
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/.gitignore
@@ -0,0 +1,6 @@
+testdata/conf_out.ini
+ini.sublime-project
+ini.sublime-workspace
+testdata/conf_reflect.ini
+.idea
+/.vscode
diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/gopkg.in/ini.v1/.golangci.yml
new file mode 100644
index 000000000..b7256bae1
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/.golangci.yml
@@ -0,0 +1,21 @@
+linters-settings:
+ nakedret:
+ max-func-lines: 0 # Disallow any unnamed return statement
+
+linters:
+ enable:
+ - deadcode
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+ - nakedret
+ - gofmt
+ - rowserrcheck
+ - unconvert
+ - goimports
diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/gopkg.in/ini.v1/LICENSE
new file mode 100644
index 000000000..d361bbcdf
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 Unknwon
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/gopkg.in/ini.v1/Makefile
new file mode 100644
index 000000000..f3b0dae2d
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/Makefile
@@ -0,0 +1,15 @@
+.PHONY: build test bench vet coverage
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -test.bench=. -test.benchmem
+
+vet:
+ go vet
+
+coverage:
+ go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md
new file mode 100644
index 000000000..5d65658b2
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/README.md
@@ -0,0 +1,43 @@
+# INI
+
+[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo)
+[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini)
+[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
+[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+## Features
+
+- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+The minimum requirement of Go is **1.6**.
+
+```sh
+$ go get gopkg.in/ini.v1
+```
+
+Please add `-u` flag to update in the future.
+
+## Getting Help
+
+- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- 中国大陆镜像:https://ini.unknwon.cn
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml
new file mode 100644
index 000000000..31f646ee0
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/codecov.yml
@@ -0,0 +1,9 @@
+coverage:
+ range: "60...95"
+ status:
+ project:
+ default:
+ threshold: 1%
+
+comment:
+ layout: 'diff'
diff --git a/vendor/gopkg.in/ini.v1/data_source.go b/vendor/gopkg.in/ini.v1/data_source.go
new file mode 100644
index 000000000..c3a541f1d
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/data_source.go
@@ -0,0 +1,76 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+var (
+ _ dataSource = (*sourceFile)(nil)
+ _ dataSource = (*sourceData)(nil)
+ _ dataSource = (*sourceReadCloser)(nil)
+)
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ case io.Reader:
+ return &sourceReadCloser{ioutil.NopCloser(s)}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
+ }
+}
diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/gopkg.in/ini.v1/deprecated.go
new file mode 100644
index 000000000..e8bda06e6
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/deprecated.go
@@ -0,0 +1,25 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+const (
+ // Deprecated: Use "DefaultSection" instead.
+ DEFAULT_SECTION = DefaultSection
+)
+
+var (
+ // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore = SnackCase
+)
diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go
new file mode 100644
index 000000000..d88347c54
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/error.go
@@ -0,0 +1,34 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go
new file mode 100644
index 000000000..b96d172cf
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/file.go
@@ -0,0 +1,517 @@
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+)
+
+// File represents a combination of one or more INI files in memory.
+type File struct {
+ options LoadOptions
+ dataSources []dataSource
+
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ lock sync.RWMutex
+
+ // To keep data in order.
+ sectionList []string
+ // To keep track of the index of a section with same name.
+ // This meta list is only used with non-unique section names are allowed.
+ sectionIndexes []int
+
+ // Actual data is stored here.
+ sections map[string][]*Section
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ if len(opts.KeyValueDelimiters) == 0 {
+ opts.KeyValueDelimiters = "=:"
+ }
+ if len(opts.KeyValueDelimiterOnWrite) == 0 {
+ opts.KeyValueDelimiterOnWrite = "="
+ }
+ if len(opts.ChildSectionDelimiter) == 0 {
+ opts.ChildSectionDelimiter = "."
+ }
+
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string][]*Section),
+ options: opts,
+ }
+}
+
+// Empty returns an empty file object.
+func Empty(opts ...LoadOptions) *File {
+ var opt LoadOptions
+ if len(opts) > 0 {
+ opt = opts[0]
+ }
+
+ // Ignore error here, we are sure our data is good.
+ f, _ := LoadSources(opt, []byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("empty section name")
+ }
+
+ if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
+ return f.sections[name][0], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+
+ // NOTE: Append to indexes must happen before appending to sections,
+ // otherwise index will have off-by-one problem.
+ f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
+
+ sec := newSection(f, name)
+ f.sections[name] = append(f.sections[name], sec)
+
+ return sec, nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return secs[0], err
+}
+
+// SectionsByName returns all sections with given name.
+func (f *File) SectionsByName(name string) ([]*Section, error) {
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ secs := f.sections[name]
+ if len(secs) == 0 {
+ return nil, fmt.Errorf("section %q does not exist", name)
+ }
+
+ return secs, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// SectionWithIndex assumes named section exists and returns a new section when not.
+func (f *File) SectionWithIndex(name string, index int) *Section {
+ secs, err := f.SectionsByName(name)
+ if err != nil || len(secs) <= index {
+ // NOTE: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ newSec, _ := f.NewSection(name)
+ return newSec
+ }
+
+ return secs[index]
+}
+
+// Sections returns a list of Section stored in the current instance.
+func (f *File) Sections() []*Section {
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sections := make([]*Section, len(f.sectionList))
+ for i, name := range f.sectionList {
+ sections[i] = f.sections[name][f.sectionIndexes[i]]
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section or all sections with given name.
+func (f *File) DeleteSection(name string) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(secs); i++ {
+ // For non-unique sections, it is always needed to remove the first one so
+ // in the next iteration, the subsequent section continue having index 0.
+ // Ignoring the error as index 0 never returns an error.
+ _ = f.DeleteSectionWithIndex(name, 0)
+ }
+}
+
+// DeleteSectionWithIndex deletes a section with given name and index.
+func (f *File) DeleteSectionWithIndex(name string, index int) error {
+ if !f.options.AllowNonUniqueSections && index != 0 {
+ return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
+ }
+
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ // Count occurrences of the sections
+ occurrences := 0
+
+ sectionListCopy := make([]string, len(f.sectionList))
+ copy(sectionListCopy, f.sectionList)
+
+ for i, s := range sectionListCopy {
+ if s != name {
+ continue
+ }
+
+ if occurrences == index {
+ if len(f.sections[name]) <= 1 {
+ delete(f.sections, name) // The last one in the map
+ } else {
+ f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
+ }
+
+ // Fix section lists
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
+
+ } else if occurrences > index {
+ // Fix the indices of all following sections with this name.
+ f.sectionIndexes[i-1]--
+ }
+
+ occurrences++
+ }
+
+ return nil
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ _ = f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ if f.options.ShortCircuit {
+ return nil
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
+ equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
+
+ if PrettyFormat || PrettyEqual {
+ equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
+ if len(sec.Comment) > 0 {
+ // Support multiline comments
+ lines := strings.Split(sec.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + lines[i]
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ if sec.isRawSection {
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modified if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KeyList:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ // Support multiline comments
+ lines := strings.Split(key.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + strings.TrimSpace(lines[i])
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ for _, val := range key.ValueWithShadows() {
+ if _, err := buf.WriteString(kname); err != nil {
+ return nil, err
+ }
+
+ if key.isBooleanType {
+ if kname != sec.keyList[len(sec.keyList)-1] {
+ buf.WriteString(LineBreak)
+ }
+ continue KeyList
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ } else if len(strings.TrimSpace(val)) != len(val) {
+ val = `"` + val + `"`
+ }
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, val := range key.nestedValues {
+ if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename after done.
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/gopkg.in/ini.v1/helper.go b/vendor/gopkg.in/ini.v1/helper.go
new file mode 100644
index 000000000..f9d80a682
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/helper.go
@@ -0,0 +1,24 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go
new file mode 100644
index 000000000..ac2a93a5b
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/ini.go
@@ -0,0 +1,176 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+const (
+ // DefaultSection is the name of default section. You can use this constant or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DefaultSection = "DEFAULT"
+
+ // Maximum allowed depth when recursively substituing variable names.
+ depthValues = 99
+)
+
+var (
+ // LineBreak is the delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
+
+ // DefaultHeader explicitly writes default section header.
+ DefaultHeader = false
+
+ // PrettySection indicates whether to put a line between sections.
+ PrettySection = true
+ // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+ // PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
+ PrettyEqual = false
+ // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatLeft = ""
+ // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatRight = ""
+)
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+func init() {
+ if runtime.GOOS == "windows" && !inTest {
+ LineBreak = "\r\n"
+ }
+}
+
+// LoadOptions contains all customized options used for load data source(s).
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // InsensitiveSections indicates whether the parser forces all section to lowercase.
+ InsensitiveSections bool
+ // InsensitiveKeys indicates whether the parser forces all key names to lowercase.
+ InsensitiveKeys bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
+ SkipUnrecognizableLines bool
+ // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
+ ShortCircuit bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // AllowNestedValues indicates whether to allow AWS-like nested values.
+ // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
+ AllowNestedValues bool
+ // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
+ // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
+ // Relevant quote: Values can also span multiple lines, as long as they are indented deeper
+ // than the first line of the value.
+ AllowPythonMultilineValues bool
+ // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
+ // Docs: https://docs.python.org/2/library/configparser.html
+ // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
+ // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
+ SpaceBeforeInlineComment bool
+ // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
+ // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
+ UnescapeValueDoubleQuotes bool
+ // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
+ // when value is NOT surrounded by any quotes.
+ // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
+ UnescapeValueCommentSymbols bool
+ // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+ // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
+ KeyValueDelimiters string
+ // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
+ KeyValueDelimiterOnWrite string
+ // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
+ ChildSectionDelimiter string
+ // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
+ PreserveSurroundedQuote bool
+ // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
+ DebugFunc DebugFunc
+ // ReaderBufferSize is the buffer size of the reader in bytes.
+ ReaderBufferSize int
+ // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
+ AllowNonUniqueSections bool
+ // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated.
+ AllowDuplicateShadowValues bool
+}
+
+// DebugFunc is the type of function called to log parse events.
+type DebugFunc func(message string)
+
+// LoadSources allows caller to apply customized options for loading from data source(s).
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// ShadowLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go
new file mode 100644
index 000000000..0302c291b
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/key.go
@@ -0,0 +1,828 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ nestedValues []string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ if !k.s.f.options.AllowDuplicateShadowValues {
+ // Deduplicate shadows based on their values.
+ if k.value == val {
+ return nil
+ }
+ for i := range k.shadows {
+ if k.shadows[i].value == val {
+ return nil
+ }
+ }
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+func (k *Key) addNestedValue(val string) error {
+ if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add nested value to auto-increment or boolean key")
+ }
+
+ k.nestedValues = append(k.nestedValues, val)
+ return nil
+}
+
+// AddNestedValue adds a nested value to the key.
+func (k *Key) AddNestedValue(val string) error {
+ if !k.s.f.options.AllowNestedValues {
+ return errors.New("nested value is not allowed")
+ }
+ return k.addNestedValue(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ return []string{k.value}
+ }
+ vals := make([]string, len(k.shadows)+1)
+ vals[0] = k.value
+ for i := range k.shadows {
+ vals[i+1] = k.shadows[i].value
+ }
+ return vals
+}
+
+// NestedValues returns nested values stored in the key.
+// It is possible returned value is nil if no nested values stored in the key.
+func (k *Key) NestedValues() []string {
+ return k.nestedValues
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < depthValues; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := vr[2 : len(vr)-2]
+
+ // Search in the same section.
+ // If not found or found the key itself, then search again in default section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil || k == nk {
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ if nk == nil {
+ // Stop when no results found in the default section,
+ // and returns the value as-is.
+ break
+ }
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ v, err := strconv.ParseInt(k.String(), 0, 64)
+ return int(v), err
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 0, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 0, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 0, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ runes := []rune(str)
+ vals := make([]string, 0, 2)
+ var buf bytes.Buffer
+ escape := false
+ idx := 0
+ for {
+ if escape {
+ escape = false
+ if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
+ buf.WriteRune('\\')
+ }
+ buf.WriteRune(runes[idx])
+ } else {
+ if runes[idx] == '\\' {
+ escape = true
+ } else if strings.HasPrefix(string(runes[idx:]), delim) {
+ idx += len(delim) - 1
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ buf.Reset()
+ } else {
+ buf.WriteRune(runes[idx])
+ }
+ }
+ idx++
+ if idx == len(runes) {
+ break
+ }
+ }
+
+ if buf.Len() > 0 {
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ }
+
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Bools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidBools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
+func (k *Key) StrictBools(delim string) ([]bool, error) {
+ return k.parseBools(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseBools transforms strings to bools.
+func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
+ vals := make([]bool, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := parseBool(str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(bool))
+ }
+ }
+ return vals, err
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseFloat(str, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(float64))
+ }
+ }
+ return vals, err
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, int(val.(int64)))
+ }
+ }
+ return vals, err
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(int64))
+ }
+ }
+ return vals, err
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, uint(val.(uint64)))
+ }
+ }
+ return vals, err
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(uint64))
+ }
+ }
+ return vals, err
+}
+
+type Parser func(str string) (interface{}, error)
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := time.Parse(format, str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(time.Time))
+ }
+ }
+ return vals, err
+}
+
+// doParse transforms strings to different types
+func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
+ vals := make([]interface{}, 0, len(strs))
+ for _, str := range strs {
+ val, err := parser(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go
new file mode 100644
index 000000000..b8b5aa86a
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/parser.go
@@ -0,0 +1,531 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+const minReaderBufferSize = 4096
+
+var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
+
+type parserOptions struct {
+ IgnoreContinuation bool
+ IgnoreInlineComment bool
+ AllowPythonMultilineValues bool
+ SpaceBeforeInlineComment bool
+ UnescapeValueDoubleQuotes bool
+ UnescapeValueCommentSymbols bool
+ PreserveSurroundedQuote bool
+ DebugFunc DebugFunc
+ ReaderBufferSize int
+}
+
+type parser struct {
+ buf *bufio.Reader
+ options parserOptions
+
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func (p *parser) debug(format string, args ...interface{}) {
+ if p.options.DebugFunc != nil {
+ p.options.DebugFunc(fmt.Sprintf(format, args...))
+ }
+}
+
+func newParser(r io.Reader, opts parserOptions) *parser {
+ size := opts.ReaderBufferSize
+ if size < minReaderBufferSize {
+ size = minReaderBufferSize
+ }
+
+ return &parser{
+ buf: bufio.NewReaderSize(r, size),
+ options: opts,
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(delimiters string, in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && line[0:3] == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ var endIdx int
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], delimiters)
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, delimiters)
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
+
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && line[0:3] == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
+ valQuote = `"`
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
+ return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
+ }
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ lastChar := line[len(line)-1]
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+ trimmedLastChar := line[len(line)-1]
+
+ // Check continuation lines when desired
+ if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !p.options.IgnoreInlineComment {
+ var i int
+ if p.options.SpaceBeforeInlineComment {
+ i = strings.Index(line, " #")
+ if i == -1 {
+ i = strings.Index(line, " ;")
+ }
+
+ } else {
+ i = strings.IndexAny(line, "#;")
+ }
+
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ }
+
+ // Trim single and double quotes
+ if (hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
+ line = line[1 : len(line)-1]
+ } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
+ line = strings.ReplaceAll(line, `\;`, ";")
+ line = strings.ReplaceAll(line, `\#`, "#")
+ } else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+
+ return line, nil
+}
+
+func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
+ parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
+ peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
+
+ indentSize := 0
+ for {
+ peekData, peekErr := peekBuffer.ReadBytes('\n')
+ if peekErr != nil {
+ if peekErr == io.EOF {
+ p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line)
+ return line, nil
+ }
+
+ p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
+ return "", peekErr
+ }
+
+ p.debug("readPythonMultilines: parsing %q", string(peekData))
+
+ peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
+ p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
+ for n, v := range peekMatches {
+ p.debug(" %d: %q", n, v)
+ }
+
+ // Return if not a Python multiline value.
+ if len(peekMatches) != 3 {
+ p.debug("readPythonMultilines: end of value, got: %q", line)
+ return line, nil
+ }
+
+ // Determine indent size and line prefix.
+ currentIndentSize := len(peekMatches[1])
+ if indentSize < 1 {
+ indentSize = currentIndentSize
+ p.debug("readPythonMultilines: indent size is %d", indentSize)
+ }
+
+ // Make sure each line is indented at least as far as first line.
+ if currentIndentSize < indentSize {
+ p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line)
+ return line, nil
+ }
+
+ // Advance the parser reader (buffer) in-sync with the peek buffer.
+ _, err := p.buf.Discard(len(peekData))
+ if err != nil {
+ p.debug("readPythonMultilines: failed to skip to the end, returning error")
+ return "", err
+ }
+
+ // Handle indented empty line.
+ line += "\n" + peekMatches[1][indentSize:] + peekMatches[2]
+ }
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader, parserOptions{
+ IgnoreContinuation: f.options.IgnoreContinuation,
+ IgnoreInlineComment: f.options.IgnoreInlineComment,
+ AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
+ SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
+ UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
+ UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
+ PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
+ DebugFunc: f.options.DebugFunc,
+ ReaderBufferSize: f.options.ReaderBufferSize,
+ })
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ name := DefaultSection
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(DefaultSection)
+ }
+ section, _ := f.NewSection(name)
+
+ // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
+ var isLastValueEmpty bool
+ var lastRegularKey *Key
+
+ var line []byte
+ var inUnparseableSection bool
+
+ // NOTE: Iterate and increase `currentPeekSize` until
+ // the size of the parser buffer is found.
+ // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
+ parserBufferSize := 0
+ // NOTE: Peek 4kb at a time.
+ currentPeekSize := minReaderBufferSize
+
+ if f.options.AllowPythonMultilineValues {
+ for {
+ peekBytes, _ := p.buf.Peek(currentPeekSize)
+ peekBytesLength := len(peekBytes)
+
+ if parserBufferSize >= peekBytesLength {
+ break
+ }
+
+ currentPeekSize *= 2
+ parserBufferSize = peekBytesLength
+ }
+ }
+
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ if f.options.AllowNestedValues &&
+ isLastValueEmpty && len(line) > 0 {
+ if line[0] == ' ' || line[0] == '\t' {
+ err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ closeIdx := bytes.LastIndexByte(line, ']')
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset auto-counter and comments
+ p.comment.Reset()
+ p.count = 1
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
+ if err != nil {
+ // Treat as boolean key when desired, and whole line is key name.
+ if IsErrDelimiterNotFound(err) {
+ switch {
+ case f.options.AllowBooleanKeys:
+ kname, err := p.readValue(line, parserBufferSize)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+
+ case f.options.SkipUnrecognizableLines:
+ continue
+ }
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:], parserBufferSize)
+ if err != nil {
+ return err
+ }
+ isLastValueEmpty = len(value) == 0
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ lastRegularKey = key
+ }
+ return nil
+}
diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go
new file mode 100644
index 000000000..a3615d820
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/section.go
@@ -0,0 +1,256 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// SetBody updates body content only if section is raw.
+func (s *Section) SetBody(body string) {
+ if !s.isRawSection {
+ return
+ }
+ s.rawBody = body
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ s.keysHash[name] = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ }
+ break
+ }
+ return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Deprecated: Use "HasKey" instead.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := make(map[string]string, len(s.keysHash))
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ delete(s.keysHash, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + s.f.options.ChildSectionDelimiter
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name]...)
+ }
+ }
+ return children
+}
diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go
new file mode 100644
index 000000000..a486b2fe0
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/struct.go
@@ -0,0 +1,747 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // SnackCase converts to format SNACK_CASE.
+ SnackCase NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= 'A' - 'a'
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+ var err error
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, err = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, err = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, err = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, err = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, err = key.parseFloat64s(strs, true, false)
+ case reflect.Bool:
+ vals, err = key.parseBools(strs, true, false)
+ case reflectTime:
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ if err != nil && isStrict {
+ return err
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflect.Bool:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to struct.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ vt := t
+ isPtr := t.Kind() == reflect.Ptr
+ if isPtr {
+ vt = t.Elem()
+ }
+ switch vt.Kind() {
+ case reflect.String:
+ stringVal := key.String()
+ if isPtr {
+ field.Set(reflect.ValueOf(&stringVal))
+ } else if len(stringVal) > 0 {
+ field.SetString(key.String())
+ }
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&boolVal))
+ } else {
+ field.SetBool(boolVal)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // ParseDuration will not return err for `0`, so check the type name
+ if vt.Name() == "Duration" {
+ durationVal, err := key.Duration()
+ if err != nil {
+ if intVal, err := key.Int64(); err == nil {
+ field.SetInt(intVal)
+ return nil
+ }
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else if int64(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetInt(intVal)
+ field.Set(pv)
+ } else {
+ field.SetInt(intVal)
+ }
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && uint64(durationVal) > 0 {
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetUint(uintVal)
+ field.Set(pv)
+ } else {
+ field.SetUint(uintVal)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetFloat(floatVal)
+ field.Set(pv)
+ } else {
+ field.SetFloat(floatVal)
+ }
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&timeVal))
+ } else {
+ field.Set(reflect.ValueOf(timeVal))
+ }
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
+ opts := strings.SplitN(tag, ",", 5)
+ rawName = opts[0]
+ for _, opt := range opts[1:] {
+ omitEmpty = omitEmpty || (opt == "omitempty")
+ allowShadow = allowShadow || (opt == "allowshadow")
+ allowNonUnique = allowNonUnique || (opt == "nonunique")
+ extends = extends || (opt == "extends")
+ }
+ return rawName, omitEmpty, allowShadow, allowNonUnique, extends
+}
+
+// mapToField maps the given value to the matching field of the given section.
+// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
+func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
+ isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ if isAnonymousPtr {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ fieldSection := s
+ if rawName != "" {
+ sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
+ if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
+ fieldSection = secs[sectionIndex]
+ }
+ }
+ if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ } else if isAnonymousPtr || isStruct || isStructPtr {
+ if secs, err := s.f.SectionsByName(fieldName); err == nil {
+ if len(secs) <= sectionIndex {
+ return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
+ }
+ // Only set the field to non-nil struct value if we have a section for it.
+ // Otherwise, we end up with a non-nil struct ptr even though there is no data.
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ // Map non-unique sections
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(fieldName, field, isStrict)
+ if err != nil {
+ return fmt.Errorf("map to slice %q: %v", fieldName, err)
+ }
+
+ field.Set(newField)
+ continue
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
+ return fmt.Errorf("set field %q: %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// mapToSlice maps all sections with the same name and returns the new value.
+// The type of the Value must be a slice.
+func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
+ secs, err := s.f.SectionsByName(secName)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ typ := val.Type().Elem()
+ for i, sec := range secs {
+ elem := reflect.New(typ)
+ if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
+ return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
+ }
+
+ val = reflect.Append(val, elem.Elem())
+ }
+ return val, nil
+}
+
+// mapTo maps a section to object v.
+func (s *Section) mapTo(v interface{}, isStrict bool) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ if typ.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(s.name, val, isStrict)
+ if err != nil {
+ return err
+ }
+
+ val.Set(newField)
+ return nil
+ }
+
+ return s.mapToField(val, isStrict, 0, s.name)
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ return s.mapTo(v, false)
+}
+
+// StrictMapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ return s.mapTo(v, true)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// StrictMapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
+// MapToWithMapper maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ if allowShadow {
+ var keyWithShadows *Key
+ for i := 0; i < field.Len(); i++ {
+ var val string
+ switch sliceOf {
+ case reflect.String:
+ val = slice.Index(i).String()
+ case reflect.Int, reflect.Int64:
+ val = fmt.Sprint(slice.Index(i).Int())
+ case reflect.Uint, reflect.Uint64:
+ val = fmt.Sprint(slice.Index(i).Uint())
+ case reflect.Float64:
+ val = fmt.Sprint(slice.Index(i).Float())
+ case reflect.Bool:
+ val = fmt.Sprint(slice.Index(i).Bool())
+ case reflectTime:
+ val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+
+ if i == 0 {
+ keyWithShadows = newKey(key.s, key.name, val)
+ } else {
+ _ = keyWithShadows.AddShadow(val)
+ }
+ }
+ *key = *keyWithShadows
+ return nil
+ }
+
+ var buf bytes.Buffer
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflect.Bool:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-len(delim)])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim, allowShadow)
+ case reflect.Ptr:
+ if !field.IsNil() {
+ return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
+ }
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
+ }
+ return false
+}
+
+// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
+type StructReflector interface {
+ ReflectINIStruct(*File) error
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ if !val.Field(i).CanInterface() {
+ continue
+ }
+
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ if omitEmpty && isEmptyValue(field) {
+ continue
+ }
+
+ if r, ok := field.Interface().(StructReflector); ok {
+ return r.ReflectINIStruct(s.f)
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
+ if err := s.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ for i := 0; i < field.Len(); i++ {
+ if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
+ return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
+ }
+
+ sec, err := s.f.NewSection(fieldName)
+ if err != nil {
+ return err
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err := sec.reflectFrom(slice.Index(i)); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ }
+ continue
+ }
+
+ // Note: Same reason as section.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+
+ // Add comment from comment tag
+ if len(key.Comment) == 0 {
+ key.Comment = tpField.Tag.Get("comment")
+ }
+
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
+ return fmt.Errorf("reflect field %q: %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects section from given struct. It overwrites existing ones.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+
+ if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
+ (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
+ // Clear sections to make sure none exists before adding the new ones
+ s.f.DeleteSection(s.name)
+
+ if typ.Kind() == reflect.Ptr {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+ return sec.reflectFrom(val.Elem())
+ }
+
+ slice := val.Slice(0, val.Len())
+ sliceOf := val.Type().Elem().Kind()
+ if sliceOf != reflect.Ptr {
+ return fmt.Errorf("not a slice of pointers")
+ }
+
+ for i := 0; i < slice.Len(); i++ {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+
+ err = sec.reflectFrom(slice.Index(i))
+ if err != nil {
+ return fmt.Errorf("reflect from %dth field: %v", i, err)
+ }
+ }
+
+ return nil
+ }
+
+ if typ.Kind() == reflect.Ptr {
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFromWithMapper reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 53b0abd61..c086e43f2 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -27,13 +27,9 @@ codeberg.org/gruf/go-pools
codeberg.org/gruf/go-store/kv
codeberg.org/gruf/go-store/storage
codeberg.org/gruf/go-store/util
-# github.com/PuerkitoBio/goquery v1.8.0
-## explicit; go 1.13
# github.com/ReneKroon/ttlcache v1.7.0
## explicit; go 1.14
github.com/ReneKroon/ttlcache
-# github.com/andybalholm/cascadia v1.3.1
-## explicit; go 1.16
# github.com/aymerick/douceur v0.2.0
## explicit
github.com/aymerick/douceur/css
@@ -45,9 +41,6 @@ github.com/buckket/go-blurhash/base83
# github.com/coreos/go-oidc/v3 v3.1.0
## explicit; go 1.14
github.com/coreos/go-oidc/v3/oidc
-# github.com/cpuguy83/go-md2man/v2 v2.0.1
-## explicit; go 1.11
-github.com/cpuguy83/go-md2man/v2/md2man
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
@@ -77,6 +70,9 @@ github.com/dsoprea/go-png-image-structure
# github.com/dsoprea/go-utility v0.0.0-20200717064901-2fccff4aa15e
## explicit; go 1.12
github.com/dsoprea/go-utility/image
+# github.com/fsnotify/fsnotify v1.5.1
+## explicit; go 1.13
+github.com/fsnotify/fsnotify
# github.com/gin-contrib/cors v1.3.1
## explicit; go 1.13
github.com/gin-contrib/cors
@@ -113,8 +109,6 @@ github.com/go-playground/validator/v10
# github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b
## explicit
github.com/go-xmlfmt/xmlfmt
-# github.com/goccy/go-json v0.7.10
-## explicit; go 1.12
# github.com/golang-jwt/jwt v3.2.2+incompatible
## explicit
github.com/golang-jwt/jwt
@@ -155,6 +149,21 @@ github.com/h2non/filetype
github.com/h2non/filetype/matchers
github.com/h2non/filetype/matchers/isobmff
github.com/h2non/filetype/types
+# github.com/hashicorp/hcl v1.0.0
+## explicit
+github.com/hashicorp/hcl
+github.com/hashicorp/hcl/hcl/ast
+github.com/hashicorp/hcl/hcl/parser
+github.com/hashicorp/hcl/hcl/printer
+github.com/hashicorp/hcl/hcl/scanner
+github.com/hashicorp/hcl/hcl/strconv
+github.com/hashicorp/hcl/hcl/token
+github.com/hashicorp/hcl/json/parser
+github.com/hashicorp/hcl/json/scanner
+github.com/hashicorp/hcl/json/token
+# github.com/inconshreveable/mousetrap v1.0.0
+## explicit
+github.com/inconshreveable/mousetrap
# github.com/jackc/chunkreader/v2 v2.0.1
## explicit; go 1.12
github.com/jackc/chunkreader/v2
@@ -195,6 +204,9 @@ github.com/kballard/go-shellquote
# github.com/leodido/go-urn v1.2.1
## explicit; go 1.13
github.com/leodido/go-urn
+# github.com/magiconair/properties v1.8.5
+## explicit; go 1.13
+github.com/magiconair/properties
# github.com/mattn/go-isatty v0.0.14
## explicit; go 1.12
github.com/mattn/go-isatty
@@ -217,6 +229,9 @@ github.com/nfnt/resize
# github.com/oklog/ulid v1.3.1
## explicit
github.com/oklog/ulid
+# github.com/pelletier/go-toml v1.9.4
+## explicit; go 1.12
+github.com/pelletier/go-toml
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
@@ -232,13 +247,38 @@ github.com/russross/blackfriday/v2
# github.com/sirupsen/logrus v1.8.1
## explicit; go 1.13
github.com/sirupsen/logrus
-# github.com/stretchr/objx v0.2.0
+# github.com/spf13/afero v1.6.0
+## explicit; go 1.13
+github.com/spf13/afero
+github.com/spf13/afero/mem
+# github.com/spf13/cast v1.4.1
+## explicit
+github.com/spf13/cast
+# github.com/spf13/cobra v1.2.1
+## explicit; go 1.14
+github.com/spf13/cobra
+# github.com/spf13/jwalterweatherman v1.1.0
+## explicit
+github.com/spf13/jwalterweatherman
+# github.com/spf13/pflag v1.0.5
## explicit; go 1.12
+github.com/spf13/pflag
+# github.com/spf13/viper v1.9.0
+## explicit; go 1.12
+github.com/spf13/viper
+github.com/spf13/viper/internal/encoding
+github.com/spf13/viper/internal/encoding/hcl
+github.com/spf13/viper/internal/encoding/json
+github.com/spf13/viper/internal/encoding/toml
+github.com/spf13/viper/internal/encoding/yaml
# github.com/stretchr/testify v1.7.0
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
github.com/stretchr/testify/suite
+# github.com/subosito/gotenv v1.2.0
+## explicit
+github.com/subosito/gotenv
# github.com/superseriousbusiness/activity v1.0.1-0.20211113133524-56560b73ace8
## explicit; go 1.12
github.com/superseriousbusiness/activity/pub
@@ -466,9 +506,6 @@ github.com/uptrace/bun/dialect/pgdialect
# github.com/uptrace/bun/dialect/sqlitedialect v1.0.18
## explicit; go 1.16
github.com/uptrace/bun/dialect/sqlitedialect
-# github.com/urfave/cli/v2 v2.3.0
-## explicit; go 1.11
-github.com/urfave/cli/v2
# github.com/vmihailenco/msgpack/v5 v5.3.5
## explicit; go 1.11
github.com/vmihailenco/msgpack/v5
@@ -593,6 +630,9 @@ google.golang.org/protobuf/reflect/protoregistry
google.golang.org/protobuf/runtime/protoiface
google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/descriptorpb
+# gopkg.in/ini.v1 v1.63.2
+## explicit
+gopkg.in/ini.v1
# gopkg.in/square/go-jose.v2 v2.6.0
## explicit
gopkg.in/square/go-jose.v2