mirror of
1
Fork 0

Implement git refs API for listing references (branches, tags and other) (#5354)

* Inital routes to git refs api

* Git refs API implementation

* Update swagger

* Fix copyright

* Make swagger happy add basic test

* Fix test

* Fix test again :)
This commit is contained in:
Lauris BH 2018-11-27 23:52:20 +02:00 committed by techknowlogick
parent 294904321c
commit 08bf443016
268 changed files with 48603 additions and 10 deletions

158
Gopkg.lock generated
View File

@ -3,19 +3,19 @@
[[projects]]
branch = "master"
digest = "1:296fd9dfbae66f6feeb09c7163ec39c262de425289154430a55d0a248c520486"
digest = "1:ebd587087cf937b6d3db7dde843a557d157fd68820a9d3d0157a8d8f4011ad29"
name = "code.gitea.io/git"
packages = ["."]
pruneopts = "NUT"
revision = "d945eda535aa7d6b3c1f486279df2a3f7d05f78b"
revision = "578ad8f1259b0d660d19b05a011596f8fd3fea37"
[[projects]]
branch = "master"
digest = "1:b194da40b41ae99546dfeec5a85f1fec2a6c51350d438e511ef90f4293c6dcd7"
digest = "1:4d2822cfcdf270183cee220e79e7bba55d5214a9c2bfa9b1fd6c6daaf5016eda"
name = "code.gitea.io/sdk"
packages = ["gitea"]
pruneopts = "NUT"
revision = "4f96d9ac89886e78c50de8c835ebe87461578a5e"
revision = "59ddbdc4be1423ab3d5f30b859193ac0308df147"
[[projects]]
digest = "1:3fcef06a1a6561955c94af6c7757a6fa37605eb653f0d06ab960e5bb80092195"
@ -234,6 +234,21 @@
pruneopts = "NUT"
revision = "57eb5e1fc594ad4b0b1dbea7b286d299e0cb43c2"
[[projects]]
digest = "1:b498b36dbb2b306d1c5205ee5236c9e60352be8f9eea9bf08186723a9f75b4f3"
name = "github.com/emirpasic/gods"
packages = [
"containers",
"lists",
"lists/arraylist",
"trees",
"trees/binaryheap",
"utils",
]
pruneopts = "NUT"
revision = "1615341f118ae12f353cc8a983f35b584342c9b3"
version = "v1.12.0"
[[projects]]
digest = "1:8603f74d35c93b37c615a02ba297be2cf2efc9ff6f1ff2b458a903990b568e48"
name = "github.com/ethantkoenig/rupture"
@ -472,6 +487,14 @@
pruneopts = "NUT"
revision = "8fb95d837f7d6db1913fecfd7bcc5333e6499596"
[[projects]]
branch = "master"
digest = "1:62fe3a7ea2050ecbd753a71889026f83d73329337ada66325cbafd5dea5f713d"
name = "github.com/jbenet/go-context"
packages = ["io"]
pruneopts = "NUT"
revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4"
[[projects]]
digest = "1:6342cf70eaae592f7b8e2552037f2a9d4d16fa321c6e36f09c3bc450add2de19"
name = "github.com/kballard/go-shellquote"
@ -479,6 +502,14 @@
pruneopts = "NUT"
revision = "cd60e84ee657ff3dc51de0b4f55dd299a3e136f2"
[[projects]]
digest = "1:29e44e9481a689be0093a0033299b95741d394a97b28e0273c21afe697873a22"
name = "github.com/kevinburke/ssh_config"
packages = ["."]
pruneopts = "NUT"
revision = "81db2a75821ed34e682567d48be488a1c3121088"
version = "0.5"
[[projects]]
digest = "1:b32126992771fddadf6a778fe7ab29150665ed78f31ce4eb550a9db3bc0e650c"
name = "github.com/keybase/go-crypto"
@ -605,6 +636,14 @@
pruneopts = "NUT"
revision = "f77f16ffc87a6a58814e64ae72d55f9c41374e6d"
[[projects]]
digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = "NUT"
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
version = "v1.0.0"
[[projects]]
digest = "1:c7dc71a7e144df03332152d730f9c5ae22cf1cfd55454cb001ba8ffcb78aa7f0"
name = "github.com/mrjones/oauth"
@ -634,6 +673,14 @@
pruneopts = "NUT"
revision = "891127d8d1b52734debe1b3c3d7e747502b6c366"
[[projects]]
digest = "1:cf254277d898b713195cc6b4a3fac8bf738b9f1121625df27843b52b267eec6c"
name = "github.com/pelletier/go-buffruneio"
packages = ["."]
pruneopts = "NUT"
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
version = "v0.2.0"
[[projects]]
digest = "1:44c66ad69563dbe3f8e76d7d6cad21a03626e53f1875b5ab163ded419e01ca7a"
name = "github.com/philhofer/fwd"
@ -735,6 +782,19 @@
pruneopts = "NUT"
revision = "1dba4b3954bc059efc3991ec364f9f9a35f597d2"
[[projects]]
digest = "1:89fd77d603a74a6540d60067debad9397865bf040955d907362c95d364baeba6"
name = "github.com/src-d/gcfg"
packages = [
".",
"scanner",
"token",
"types",
]
pruneopts = "NUT"
revision = "1ac3a1ac202429a54835fe8408a92880156b489d"
version = "v1.4.0"
[[projects]]
branch = "master"
digest = "1:69177343ca227319b4580441a67d9d889e9ac7fcbfb89fbaa36d3283e6ab0139"
@ -782,6 +842,14 @@
pruneopts = "NUT"
revision = "8ce1146b8621c95164efd9c8b1124cfa9b8afb4e"
[[projects]]
digest = "1:3148cb3478c26a92b4c1a18abb9428234b281e278af6267840721a24b6cbc6a3"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
pruneopts = "NUT"
revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c"
version = "v0.2.0"
[[projects]]
digest = "1:27d050258a4b19ca3b7a1bf26f4a04c5c66bbf0670b346ee509ebb0ad82257a6"
name = "github.com/yohcop/openid-go"
@ -790,19 +858,28 @@
revision = "2c050d2dae5345c417db301f11fda6fbf5ad0f0a"
[[projects]]
digest = "1:e4ea859df4986eb46feebbb84a2d163a4a314e87668177ca13b3b0adecaf50e8"
digest = "1:c3d6b9e2cf3936ba9927da2e8858651aad69890b9dd3349f1316b4003b25d7a3"
name = "golang.org/x/crypto"
packages = [
"acme",
"acme/autocert",
"cast5",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"md4",
"openpgp",
"openpgp/armor",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"pbkdf2",
"poly1305",
"ssh",
"ssh/agent",
"ssh/knownhosts",
]
pruneopts = "NUT"
revision = "12dd70caea0268ac0d6c2707d0611ef601e7c64e"
@ -951,6 +1028,69 @@
revision = "e6179049628164864e6e84e973cfb56335748dea"
version = "v2.3.2"
[[projects]]
digest = "1:1cf1388ec8c73b7ecc711d9f279ab631ea0a6964d1ccc32809a6be90c33fa2a0"
name = "gopkg.in/src-d/go-billy.v4"
packages = [
".",
"helper/chroot",
"helper/polyfill",
"osfs",
"util",
]
pruneopts = "NUT"
revision = "982626487c60a5252e7d0b695ca23fb0fa2fd670"
version = "v4.3.0"
[[projects]]
digest = "1:8a0efb153cc5b7e0e129d716834217be483e2b326e72f3dcca8b03cd3207e9e4"
name = "gopkg.in/src-d/go-git.v4"
packages = [
".",
"config",
"internal/revision",
"plumbing",
"plumbing/cache",
"plumbing/filemode",
"plumbing/format/config",
"plumbing/format/diff",
"plumbing/format/gitignore",
"plumbing/format/idxfile",
"plumbing/format/index",
"plumbing/format/objfile",
"plumbing/format/packfile",
"plumbing/format/pktline",
"plumbing/object",
"plumbing/protocol/packp",
"plumbing/protocol/packp/capability",
"plumbing/protocol/packp/sideband",
"plumbing/revlist",
"plumbing/storer",
"plumbing/transport",
"plumbing/transport/client",
"plumbing/transport/file",
"plumbing/transport/git",
"plumbing/transport/http",
"plumbing/transport/internal/common",
"plumbing/transport/server",
"plumbing/transport/ssh",
"storage",
"storage/filesystem",
"storage/filesystem/dotgit",
"storage/memory",
"utils/binary",
"utils/diff",
"utils/ioutil",
"utils/merkletrie",
"utils/merkletrie/filesystem",
"utils/merkletrie/index",
"utils/merkletrie/internal/frame",
"utils/merkletrie/noder",
]
pruneopts = "NUT"
revision = "f62cd8e3495579a8323455fa0c4e6c44bb0d5e09"
version = "v4.8.0"
[[projects]]
digest = "1:9c541fc507676a69ea8aaed1af53278a5241d26ce0f192c993fec2ac5b78f795"
name = "gopkg.in/testfixtures.v2"
@ -959,6 +1099,14 @@
revision = "fa3fb89109b0b31957a5430cef3e93e535de362b"
version = "v2.5.0"
[[projects]]
digest = "1:b233ad4ec87ac916e7bf5e678e98a2cb9e8b52f6de6ad3e11834fc7a71b8e3bf"
name = "gopkg.in/warnings.v0"
packages = ["."]
pruneopts = "NUT"
revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
version = "v0.1.2"
[[projects]]
digest = "1:ad6f94355d292690137613735965bd3688844880fdab90eccf66321910344942"
name = "gopkg.in/yaml.v2"

View File

@ -0,0 +1,34 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package integrations
import (
"net/http"
"testing"
"code.gitea.io/gitea/models"
)
func TestAPIReposGitRefs(t *testing.T) {
prepareTestEnv(t)
user := models.AssertExistsAndLoadBean(t, &models.User{ID: 2}).(*models.User)
// Login as User2.
session := loginUser(t, user.Name)
token := getTokenForLoggedInUser(t, session)
for _, ref := range [...]string{
"refs/heads/master", // Branch
"refs/tags/v1.1", // Tag
} {
req := NewRequestf(t, "GET", "/api/v1/repos/%s/repo1/git/%s?token="+token, user.Name, ref)
session.MakeRequest(t, req, http.StatusOK)
}
// Test getting all refs
req := NewRequestf(t, "GET", "/api/v1/repos/%s/repo1/git/refs?token="+token, user.Name)
session.MakeRequest(t, req, http.StatusOK)
// Test getting non-existent refs
req = NewRequestf(t, "GET", "/api/v1/repos/%s/repo1/git/refs/heads/unknown?token="+token, user.Name)
session.MakeRequest(t, req, http.StatusNotFound)
}

View File

@ -573,6 +573,10 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/status", repo.GetCombinedCommitStatusByRef)
m.Get("/statuses", repo.GetCommitStatusesByRef)
})
m.Group("/git", func() {
m.Get("/refs", repo.GetGitAllRefs)
m.Get("/refs/*", repo.GetGitRefs)
})
}, repoAssignment())
})

View File

@ -0,0 +1,115 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"code.gitea.io/gitea/modules/context"
"code.gitea.io/git"
api "code.gitea.io/sdk/gitea"
)
// GetGitAllRefs get ref or an list all the refs of a repository
func GetGitAllRefs(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/git/refs repository repoListAllGitRefs
// ---
// summary: Get specified ref or filtered repository's refs
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// responses:
// "200":
// "$ref": "#/responses/Reference"
// "$ref": "#/responses/ReferenceList"
// "404":
// "$ref": "#/responses/notFound"
getGitRefsInternal(ctx, "")
}
// GetGitRefs get ref or an filteresd list of refs of a repository
func GetGitRefs(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/git/refs/{ref} repository repoListGitRefs
// ---
// summary: Get specified ref or filtered repository's refs
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: ref
// in: path
// description: part or full name of the ref
// type: string
// required: true
// responses:
// "200":
// "$ref": "#/responses/Reference"
// "$ref": "#/responses/ReferenceList"
// "404":
// "$ref": "#/responses/notFound"
getGitRefsInternal(ctx, ctx.Params("*"))
}
func getGitRefsInternal(ctx *context.APIContext, filter string) {
gitRepo, err := git.OpenRepository(ctx.Repo.Repository.RepoPath())
if err != nil {
ctx.Error(500, "OpenRepository", err)
return
}
if len(filter) > 0 {
filter = "refs/" + filter
}
refs, err := gitRepo.GetRefsFiltered(filter)
if err != nil {
ctx.Error(500, "GetRefsFiltered", err)
return
}
if len(refs) == 0 {
ctx.Status(404)
return
}
apiRefs := make([]*api.Reference, len(refs))
for i := range refs {
apiRefs[i] = &api.Reference{
Ref: refs[i].Name,
URL: ctx.Repo.Repository.APIURL() + "/git/" + refs[i].Name,
Object: &api.GitObject{
SHA: refs[i].Object.String(),
Type: refs[i].Type,
// TODO: Add commit/tag info URL
//URL: ctx.Repo.Repository.APIURL() + "/git/" + refs[i].Type + "s/" + refs[i].Object.String(),
},
}
}
// If single reference is found and it matches filter exactly return it as object
if len(apiRefs) == 1 && apiRefs[0].Ref == filter {
ctx.JSON(200, &apiRefs[0])
return
}
ctx.JSON(200, &apiRefs)
}

View File

@ -36,6 +36,20 @@ type swaggerResponseBranchList struct {
Body []api.Branch `json:"body"`
}
// Reference
// swagger:response Reference
type swaggerResponseReference struct {
// in:body
Body api.Reference `json:"body"`
}
// ReferenceList
// swagger:response ReferenceList
type swaggerResponseReferenceList struct {
// in:body
Body []api.Reference `json:"body"`
}
// Hook
// swagger:response Hook
type swaggerResponseHook struct {

View File

@ -1560,6 +1560,85 @@
}
}
},
"/repos/{owner}/{repo}/git/refs": {
"get": {
"produces": [
"application/json"
],
"tags": [
"repository"
],
"summary": "Get specified ref or filtered repository's refs",
"operationId": "repoListAllGitRefs",
"parameters": [
{
"type": "string",
"description": "owner of the repo",
"name": "owner",
"in": "path",
"required": true
},
{
"type": "string",
"description": "name of the repo",
"name": "repo",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"$ref": "#/responses/ReferenceList"
},
"404": {
"$ref": "#/responses/notFound"
}
}
}
},
"/repos/{owner}/{repo}/git/refs/{ref}": {
"get": {
"produces": [
"application/json"
],
"tags": [
"repository"
],
"summary": "Get specified ref or filtered repository's refs",
"operationId": "repoListGitRefs",
"parameters": [
{
"type": "string",
"description": "owner of the repo",
"name": "owner",
"in": "path",
"required": true
},
{
"type": "string",
"description": "name of the repo",
"name": "repo",
"in": "path",
"required": true
},
{
"type": "string",
"description": "part or full name of the ref",
"name": "ref",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"$ref": "#/responses/ReferenceList"
},
"404": {
"$ref": "#/responses/notFound"
}
}
}
},
"/repos/{owner}/{repo}/hooks": {
"get": {
"produces": [
@ -6937,6 +7016,25 @@
},
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
},
"GitObject": {
"type": "object",
"title": "GitObject represents a Git object.",
"properties": {
"sha": {
"type": "string",
"x-go-name": "SHA"
},
"type": {
"type": "string",
"x-go-name": "Type"
},
"url": {
"type": "string",
"x-go-name": "URL"
}
},
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
},
"Issue": {
"description": "Issue represents an issue in a repository",
"type": "object",
@ -7526,6 +7624,24 @@
},
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
},
"Reference": {
"type": "object",
"title": "Reference represents a Git reference.",
"properties": {
"object": {
"$ref": "#/definitions/GitObject"
},
"ref": {
"type": "string",
"x-go-name": "Ref"
},
"url": {
"type": "string",
"x-go-name": "URL"
}
},
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
},
"Release": {
"description": "Release represents a repository release",
"type": "object",
@ -8177,6 +8293,21 @@
}
}
},
"Reference": {
"description": "Reference",
"schema": {
"$ref": "#/definitions/Reference"
}
},
"ReferenceList": {
"description": "ReferenceList",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/Reference"
}
}
},
"Release": {
"description": "Release",
"schema": {

18
vendor/code.gitea.io/git/ref.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
// Reference represents a Git ref.
type Reference struct {
Name string
repo *Repository
Object SHA1 // The id of this commit object
Type string
}
// Commit return the commit of the reference
func (ref *Reference) Commit() (*Commit, error) {
return ref.repo.getCommit(ref.Object)
}

View File

@ -1,4 +1,5 @@
// Copyright 2015 The Gogs Authors. All rights reserved.
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
@ -7,6 +8,9 @@ package git
import (
"fmt"
"strings"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
)
// BranchPrefix base dir of the branch information file store on git
@ -60,16 +64,23 @@ func (repo *Repository) SetDefaultBranch(name string) error {
// GetBranches returns all branches of the repository.
func (repo *Repository) GetBranches() ([]string, error) {
stdout, err := NewCommand("for-each-ref", "--format=%(refname)", BranchPrefix).RunInDir(repo.Path)
r, err := git.PlainOpen(repo.Path)
if err != nil {
return nil, err
}
refs := strings.Split(stdout, "\n")
branches := make([]string, len(refs)-1)
for i, ref := range refs[:len(refs)-1] {
branches[i] = strings.TrimPrefix(ref, BranchPrefix)
branchIter, err := r.Branches()
if err != nil {
return nil, err
}
branches := make([]string, 0)
if err = branchIter.ForEach(func(branch *plumbing.Reference) error {
branches = append(branches, branch.Name().Short())
return nil
}); err != nil {
return nil, err
}
return branches, nil
}

51
vendor/code.gitea.io/git/repo_ref.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"strings"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
)
// GetRefs returns all references of the repository.
func (repo *Repository) GetRefs() ([]*Reference, error) {
return repo.GetRefsFiltered("")
}
// GetRefsFiltered returns all references of the repository that matches patterm exactly or starting with.
func (repo *Repository) GetRefsFiltered(pattern string) ([]*Reference, error) {
r, err := git.PlainOpen(repo.Path)
if err != nil {
return nil, err
}
refsIter, err := r.References()
if err != nil {
return nil, err
}
refs := make([]*Reference, 0)
if err = refsIter.ForEach(func(ref *plumbing.Reference) error {
if ref.Name() != plumbing.HEAD && !ref.Name().IsRemote() &&
(pattern == "" || strings.HasPrefix(ref.Name().String(), pattern)) {
r := &Reference{
Name: ref.Name().String(),
Object: SHA1(ref.Hash()),
Type: string(ObjectCommit),
repo: repo,
}
if ref.Name().IsTag() {
r.Type = string(ObjectTag)
}
refs = append(refs, r)
}
return nil
}); err != nil {
return nil, err
}
return refs, nil
}

69
vendor/code.gitea.io/sdk/gitea/repo_refs.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package gitea
import (
"encoding/json"
"errors"
"fmt"
"strings"
)
// Reference represents a Git reference.
type Reference struct {
Ref string `json:"ref"`
URL string `json:"url"`
Object *GitObject `json:"object"`
}
// GitObject represents a Git object.
type GitObject struct {
Type string `json:"type"`
SHA string `json:"sha"`
URL string `json:"url"`
}
// GetRepoRef get one ref's information of one repository
func (c *Client) GetRepoRef(user, repo, ref string) (*Reference, error) {
ref = strings.TrimPrefix(ref, "refs/")
r := new(Reference)
err := c.getParsedResponse("GET", fmt.Sprintf("/repos/%s/%s/git/refs/%s", user, repo, ref), nil, nil, &r)
if _, ok := err.(*json.UnmarshalTypeError); ok {
// Multiple refs
return nil, errors.New("no exact match found for this ref")
} else if err != nil {
return nil, err
}
return r, nil
}
// GetRepoRefs get list of ref's information of one repository
func (c *Client) GetRepoRefs(user, repo, ref string) ([]*Reference, error) {
ref = strings.TrimPrefix(ref, "refs/")
resp, err := c.getResponse("GET", fmt.Sprintf("/repos/%s/%s/git/refs/%s", user, repo, ref), nil, nil)
if err != nil {
return nil, err
}
// Attempt to unmarshal single returned ref.
r := new(Reference)
refErr := json.Unmarshal(resp, r)
if refErr == nil {
return []*Reference{r}, nil
}
// Attempt to unmarshal multiple refs.
var rs []*Reference
refsErr := json.Unmarshal(resp, &rs)
if refsErr == nil {
if len(rs) == 0 {
return nil, errors.New("unexpected response: an array of refs with length 0")
}
return rs, nil
}
return nil, fmt.Errorf("unmarshalling failed for both single and multiple refs: %s and %s", refErr, refsErr)
}

41
vendor/github.com/emirpasic/gods/LICENSE generated vendored Normal file
View File

@ -0,0 +1,41 @@
Copyright (c) 2015, Emir Pasic
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------
AVL Tree:
Copyright (c) 2017 Benjamin Scher Purcell <benjapurcell@gmail.com>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@ -0,0 +1,35 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package containers provides core interfaces and functions for data structures.
//
// Container is the base interface for all data structures to implement.
//
// Iterators provide stateful iterators.
//
// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions.
//
// Serialization provides serializers (marshalers) and deserializers (unmarshalers).
package containers
import "github.com/emirpasic/gods/utils"
// Container is base interface that all data structures implement.
type Container interface {
Empty() bool
Size() int
Clear()
Values() []interface{}
}
// GetSortedValues returns sorted container's elements with respect to the passed comparator.
// Does not effect the ordering of elements within the container.
func GetSortedValues(container Container, comparator utils.Comparator) []interface{} {
values := container.Values()
if len(values) < 2 {
return values
}
utils.Sort(values, comparator)
return values
}

View File

@ -0,0 +1,61 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index.
type EnumerableWithIndex interface {
// Each calls the given function once for each element, passing that element's index and value.
Each(func(index int, value interface{}))
// Map invokes the given function once for each element and returns a
// container containing the values returned by the given function.
// TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(index int, value interface{}) interface{}) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(index int, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(index int, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(index int, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (index,value) for which the function is true or -1,nil otherwise
// if no element matches the criteria.
Find(func(index int, value interface{}) bool) (int, interface{})
}
// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs.
type EnumerableWithKey interface {
// Each calls the given function once for each element, passing that element's key and value.
Each(func(key interface{}, value interface{}))
// Map invokes the given function once for each element and returns a container
// containing the values returned by the given function as key/value pairs.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(key interface{}, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(key interface{}, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(key interface{}, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (key,value) for which the function is true or nil,nil otherwise if no element
// matches the criteria.
Find(func(key interface{}, value interface{}) bool) (interface{}, interface{})
}

109
vendor/github.com/emirpasic/gods/containers/iterator.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
type IteratorWithIndex interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Index returns the current element's index.
// Does not modify the state of the iterator.
Index() int
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
First() bool
}
// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
type IteratorWithKey interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Key returns the current element's key.
// Does not modify the state of the iterator.
Key() interface{}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
First() bool
}
// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
//
// Essentially it is the same as IteratorWithIndex, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
//
// End() function to move the iterator past the last element (one-past-the-end).
type ReverseIteratorWithIndex interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithIndex
}
// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
//
// Essentially it is the same as IteratorWithKey, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
type ReverseIteratorWithKey interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithKey
}

View File

@ -0,0 +1,17 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// JSONSerializer provides JSON serialization
type JSONSerializer interface {
// ToJSON outputs the JSON representation of containers's elements.
ToJSON() ([]byte, error)
}
// JSONDeserializer provides JSON deserialization
type JSONDeserializer interface {
// FromJSON populates containers's elements from the input JSON representation.
FromJSON([]byte) error
}

View File

@ -0,0 +1,228 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package arraylist implements the array list.
//
// Structure is not thread safe.
//
// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
package arraylist
import (
"fmt"
"strings"
"github.com/emirpasic/gods/lists"
"github.com/emirpasic/gods/utils"
)
func assertListImplementation() {
var _ lists.List = (*List)(nil)
}
// List holds the elements in a slice
type List struct {
elements []interface{}
size int
}
const (
growthFactor = float32(2.0) // growth by 100%
shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink)
)
// New instantiates a new list and adds the passed values, if any, to the list
func New(values ...interface{}) *List {
list := &List{}
if len(values) > 0 {
list.Add(values...)
}
return list
}
// Add appends a value at the end of the list
func (list *List) Add(values ...interface{}) {
list.growBy(len(values))
for _, value := range values {
list.elements[list.size] = value
list.size++
}
}
// Get returns the element at index.
// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false.
func (list *List) Get(index int) (interface{}, bool) {
if !list.withinRange(index) {
return nil, false
}
return list.elements[index], true
}
// Remove removes the element at the given index from the list.
func (list *List) Remove(index int) {
if !list.withinRange(index) {
return
}
list.elements[index] = nil // cleanup reference
copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this)
list.size--
list.shrink()
}
// Contains checks if elements (one or more) are present in the set.
// All elements have to be present in the set for the method to return true.
// Performance time complexity of n^2.
// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set.
func (list *List) Contains(values ...interface{}) bool {
for _, searchValue := range values {
found := false
for _, element := range list.elements {
if element == searchValue {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Values returns all elements in the list.
func (list *List) Values() []interface{} {
newElements := make([]interface{}, list.size, list.size)
copy(newElements, list.elements[:list.size])
return newElements
}
//IndexOf returns index of provided element
func (list *List) IndexOf(value interface{}) int {
if list.size == 0 {
return -1
}
for index, element := range list.elements {
if element == value {
return index
}
}
return -1
}
// Empty returns true if list does not contain any elements.
func (list *List) Empty() bool {
return list.size == 0
}
// Size returns number of elements within the list.
func (list *List) Size() int {
return list.size
}
// Clear removes all elements from the list.
func (list *List) Clear() {
list.size = 0
list.elements = []interface{}{}
}
// Sort sorts values (in-place) using.
func (list *List) Sort(comparator utils.Comparator) {
if len(list.elements) < 2 {
return
}
utils.Sort(list.elements[:list.size], comparator)
}
// Swap swaps the two values at the specified positions.
func (list *List) Swap(i, j int) {
if list.withinRange(i) && list.withinRange(j) {
list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
}
}
// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right.
// Does not do anything if position is negative or bigger than list's size
// Note: position equal to list's size is valid, i.e. append.
func (list *List) Insert(index int, values ...interface{}) {
if !list.withinRange(index) {
// Append
if index == list.size {
list.Add(values...)
}
return
}
l := len(values)
list.growBy(l)
list.size += l
copy(list.elements[index+l:], list.elements[index:list.size-l])
copy(list.elements[index:], values)
}
// Set the value at specified index
// Does not do anything if position is negative or bigger than list's size
// Note: position equal to list's size is valid, i.e. append.
func (list *List) Set(index int, value interface{}) {
if !list.withinRange(index) {
// Append
if index == list.size {
list.Add(value)
}
return
}
list.elements[index] = value
}
// String returns a string representation of container
func (list *List) String() string {
str := "ArrayList\n"
values := []string{}
for _, value := range list.elements[:list.size] {
values = append(values, fmt.Sprintf("%v", value))
}
str += strings.Join(values, ", ")
return str
}
// Check that the index is within bounds of the list
func (list *List) withinRange(index int) bool {
return index >= 0 && index < list.size
}
func (list *List) resize(cap int) {
newElements := make([]interface{}, cap, cap)
copy(newElements, list.elements)
list.elements = newElements
}
// Expand the array if necessary, i.e. capacity will be reached if we add n elements
func (list *List) growBy(n int) {
// When capacity is reached, grow by a factor of growthFactor and add number of elements
currentCapacity := cap(list.elements)
if list.size+n >= currentCapacity {
newCapacity := int(growthFactor * float32(currentCapacity+n))
list.resize(newCapacity)
}
}
// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity
func (list *List) shrink() {
if shrinkFactor == 0.0 {
return
}
// Shrink when size is at shrinkFactor * capacity
currentCapacity := cap(list.elements)
if list.size <= int(float32(currentCapacity)*shrinkFactor) {
list.resize(list.size)
}
}

View File

@ -0,0 +1,79 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import "github.com/emirpasic/gods/containers"
func assertEnumerableImplementation() {
var _ containers.EnumerableWithIndex = (*List)(nil)
}
// Each calls the given function once for each element, passing that element's index and value.
func (list *List) Each(f func(index int, value interface{})) {
iterator := list.Iterator()
for iterator.Next() {
f(iterator.Index(), iterator.Value())
}
}
// Map invokes the given function once for each element and returns a
// container containing the values returned by the given function.
func (list *List) Map(f func(index int, value interface{}) interface{}) *List {
newList := &List{}
iterator := list.Iterator()
for iterator.Next() {
newList.Add(f(iterator.Index(), iterator.Value()))
}
return newList
}
// Select returns a new container containing all elements for which the given function returns a true value.
func (list *List) Select(f func(index int, value interface{}) bool) *List {
newList := &List{}
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
newList.Add(iterator.Value())
}
}
return newList
}
// Any passes each element of the collection to the given function and
// returns true if the function ever returns true for any element.
func (list *List) Any(f func(index int, value interface{}) bool) bool {
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
return true
}
}
return false
}
// All passes each element of the collection to the given function and
// returns true if the function returns true for all elements.
func (list *List) All(f func(index int, value interface{}) bool) bool {
iterator := list.Iterator()
for iterator.Next() {
if !f(iterator.Index(), iterator.Value()) {
return false
}
}
return true
}
// Find passes each element of the container to the given function and returns
// the first (index,value) for which the function is true or -1,nil otherwise
// if no element matches the criteria.
func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) {
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
return iterator.Index(), iterator.Value()
}
}
return -1, nil
}

View File

@ -0,0 +1,83 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import "github.com/emirpasic/gods/containers"
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
}
// Iterator holding the iterator's state
type Iterator struct {
list *List
index int
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
func (list *List) Iterator() Iterator {
return Iterator{list: list, index: -1}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
if iterator.index < iterator.list.size {
iterator.index++
}
return iterator.list.withinRange(iterator.index)
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
if iterator.index >= 0 {
iterator.index--
}
return iterator.list.withinRange(iterator.index)
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
return iterator.list.elements[iterator.index]
}
// Index returns the current element's index.
// Does not modify the state of the iterator.
func (iterator *Iterator) Index() int {
return iterator.index
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.index = -1
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.index = iterator.list.size
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) First() bool {
iterator.Begin()
return iterator.Next()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
iterator.End()
return iterator.Prev()
}

View File

@ -0,0 +1,29 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import (
"encoding/json"
"github.com/emirpasic/gods/containers"
)
func assertSerializationImplementation() {
var _ containers.JSONSerializer = (*List)(nil)
var _ containers.JSONDeserializer = (*List)(nil)
}
// ToJSON outputs the JSON representation of list's elements.
func (list *List) ToJSON() ([]byte, error) {
return json.Marshal(list.elements[:list.size])
}
// FromJSON populates list's elements from the input JSON representation.
func (list *List) FromJSON(data []byte) error {
err := json.Unmarshal(data, &list.elements)
if err == nil {
list.size = len(list.elements)
}
return err
}

33
vendor/github.com/emirpasic/gods/lists/lists.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lists provides an abstract List interface.
//
// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item.
//
// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
package lists
import (
"github.com/emirpasic/gods/containers"
"github.com/emirpasic/gods/utils"
)
// List interface that all lists implement
type List interface {
Get(index int) (interface{}, bool)
Remove(index int)
Add(values ...interface{})
Contains(values ...interface{}) bool
Sort(comparator utils.Comparator)
Swap(index1, index2 int)
Insert(index int, values ...interface{})
Set(index int, value interface{})
containers.Container
// Empty() bool
// Size() int
// Clear()
// Values() []interface{}
}

View File

@ -0,0 +1,163 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binaryheap implements a binary heap backed by array list.
//
// Comparator defines this heap as either min or max heap.
//
// Structure is not thread safe.
//
// References: http://en.wikipedia.org/wiki/Binary_heap
package binaryheap
import (
"fmt"
"github.com/emirpasic/gods/lists/arraylist"
"github.com/emirpasic/gods/trees"
"github.com/emirpasic/gods/utils"
"strings"
)
func assertTreeImplementation() {
var _ trees.Tree = (*Heap)(nil)
}
// Heap holds elements in an array-list
type Heap struct {
list *arraylist.List
Comparator utils.Comparator
}
// NewWith instantiates a new empty heap tree with the custom comparator.
func NewWith(comparator utils.Comparator) *Heap {
return &Heap{list: arraylist.New(), Comparator: comparator}
}
// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int.
func NewWithIntComparator() *Heap {
return &Heap{list: arraylist.New(), Comparator: utils.IntComparator}
}
// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string.
func NewWithStringComparator() *Heap {
return &Heap{list: arraylist.New(), Comparator: utils.StringComparator}
}
// Push adds a value onto the heap and bubbles it up accordingly.
func (heap *Heap) Push(values ...interface{}) {
if len(values) == 1 {
heap.list.Add(values[0])
heap.bubbleUp()
} else {
// Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap
for _, value := range values {
heap.list.Add(value)
}
size := heap.list.Size()/2 + 1
for i := size; i >= 0; i-- {
heap.bubbleDownIndex(i)
}
}
}
// Pop removes top element on heap and returns it, or nil if heap is empty.
// Second return parameter is true, unless the heap was empty and there was nothing to pop.
func (heap *Heap) Pop() (value interface{}, ok bool) {
value, ok = heap.list.Get(0)
if !ok {
return
}
lastIndex := heap.list.Size() - 1
heap.list.Swap(0, lastIndex)
heap.list.Remove(lastIndex)
heap.bubbleDown()
return
}
// Peek returns top element on the heap without removing it, or nil if heap is empty.
// Second return parameter is true, unless the heap was empty and there was nothing to peek.
func (heap *Heap) Peek() (value interface{}, ok bool) {
return heap.list.Get(0)
}
// Empty returns true if heap does not contain any elements.
func (heap *Heap) Empty() bool {
return heap.list.Empty()
}
// Size returns number of elements within the heap.
func (heap *Heap) Size() int {
return heap.list.Size()
}
// Clear removes all elements from the heap.
func (heap *Heap) Clear() {
heap.list.Clear()
}
// Values returns all elements in the heap.
func (heap *Heap) Values() []interface{} {
return heap.list.Values()
}
// String returns a string representation of container
func (heap *Heap) String() string {
str := "BinaryHeap\n"
values := []string{}
for _, value := range heap.list.Values() {
values = append(values, fmt.Sprintf("%v", value))
}
str += strings.Join(values, ", ")
return str
}
// Performs the "bubble down" operation. This is to place the element that is at the root
// of the heap in its correct place so that the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleDown() {
heap.bubbleDownIndex(0)
}
// Performs the "bubble down" operation. This is to place the element that is at the index
// of the heap in its correct place so that the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleDownIndex(index int) {
size := heap.list.Size()
for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 {
rightIndex := index<<1 + 2
smallerIndex := leftIndex
leftValue, _ := heap.list.Get(leftIndex)
rightValue, _ := heap.list.Get(rightIndex)
if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 {
smallerIndex = rightIndex
}
indexValue, _ := heap.list.Get(index)
smallerValue, _ := heap.list.Get(smallerIndex)
if heap.Comparator(indexValue, smallerValue) > 0 {
heap.list.Swap(index, smallerIndex)
} else {
break
}
index = smallerIndex
}
}
// Performs the "bubble up" operation. This is to place a newly inserted
// element (i.e. last element in the list) in its correct place so that
// the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleUp() {
index := heap.list.Size() - 1
for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 {
indexValue, _ := heap.list.Get(index)
parentValue, _ := heap.list.Get(parentIndex)
if heap.Comparator(parentValue, indexValue) <= 0 {
break
}
heap.list.Swap(index, parentIndex)
index = parentIndex
}
}
// Check that the index is within bounds of the list
func (heap *Heap) withinRange(index int) bool {
return index >= 0 && index < heap.list.Size()
}

View File

@ -0,0 +1,84 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binaryheap
import "github.com/emirpasic/gods/containers"
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
type Iterator struct {
heap *Heap
index int
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
func (heap *Heap) Iterator() Iterator {
return Iterator{heap: heap, index: -1}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
if iterator.index < iterator.heap.Size() {
iterator.index++
}
return iterator.heap.withinRange(iterator.index)
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
if iterator.index >= 0 {
iterator.index--
}
return iterator.heap.withinRange(iterator.index)
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
value, _ := iterator.heap.list.Get(iterator.index)
return value
}
// Index returns the current element's index.
// Does not modify the state of the iterator.
func (iterator *Iterator) Index() int {
return iterator.index
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.index = -1
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.index = iterator.heap.Size()
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) First() bool {
iterator.Begin()
return iterator.Next()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
iterator.End()
return iterator.Prev()
}

View File

@ -0,0 +1,22 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binaryheap
import "github.com/emirpasic/gods/containers"
func assertSerializationImplementation() {
var _ containers.JSONSerializer = (*Heap)(nil)
var _ containers.JSONDeserializer = (*Heap)(nil)
}
// ToJSON outputs the JSON representation of the heap.
func (heap *Heap) ToJSON() ([]byte, error) {
return heap.list.ToJSON()
}
// FromJSON populates the heap from the input JSON representation.
func (heap *Heap) FromJSON(data []byte) error {
return heap.list.FromJSON(data)
}

21
vendor/github.com/emirpasic/gods/trees/trees.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package trees provides an abstract Tree interface.
//
// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes.
//
// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29
package trees
import "github.com/emirpasic/gods/containers"
// Tree interface that all trees implement
type Tree interface {
containers.Container
// Empty() bool
// Size() int
// Clear()
// Values() []interface{}
}

251
vendor/github.com/emirpasic/gods/utils/comparator.go generated vendored Normal file
View File

@ -0,0 +1,251 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "time"
// Comparator will make type assertion (see IntComparator for example),
// which will panic if a or b are not of the asserted type.
//
// Should return a number:
// negative , if a < b
// zero , if a == b
// positive , if a > b
type Comparator func(a, b interface{}) int
// StringComparator provides a fast comparison on strings
func StringComparator(a, b interface{}) int {
s1 := a.(string)
s2 := b.(string)
min := len(s2)
if len(s1) < len(s2) {
min = len(s1)
}
diff := 0
for i := 0; i < min && diff == 0; i++ {
diff = int(s1[i]) - int(s2[i])
}
if diff == 0 {
diff = len(s1) - len(s2)
}
if diff < 0 {
return -1
}
if diff > 0 {
return 1
}
return 0
}
// IntComparator provides a basic comparison on int
func IntComparator(a, b interface{}) int {
aAsserted := a.(int)
bAsserted := b.(int)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int8Comparator provides a basic comparison on int8
func Int8Comparator(a, b interface{}) int {
aAsserted := a.(int8)
bAsserted := b.(int8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int16Comparator provides a basic comparison on int16
func Int16Comparator(a, b interface{}) int {
aAsserted := a.(int16)
bAsserted := b.(int16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int32Comparator provides a basic comparison on int32
func Int32Comparator(a, b interface{}) int {
aAsserted := a.(int32)
bAsserted := b.(int32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int64Comparator provides a basic comparison on int64
func Int64Comparator(a, b interface{}) int {
aAsserted := a.(int64)
bAsserted := b.(int64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UIntComparator provides a basic comparison on uint
func UIntComparator(a, b interface{}) int {
aAsserted := a.(uint)
bAsserted := b.(uint)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt8Comparator provides a basic comparison on uint8
func UInt8Comparator(a, b interface{}) int {
aAsserted := a.(uint8)
bAsserted := b.(uint8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt16Comparator provides a basic comparison on uint16
func UInt16Comparator(a, b interface{}) int {
aAsserted := a.(uint16)
bAsserted := b.(uint16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt32Comparator provides a basic comparison on uint32
func UInt32Comparator(a, b interface{}) int {
aAsserted := a.(uint32)
bAsserted := b.(uint32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt64Comparator provides a basic comparison on uint64
func UInt64Comparator(a, b interface{}) int {
aAsserted := a.(uint64)
bAsserted := b.(uint64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float32Comparator provides a basic comparison on float32
func Float32Comparator(a, b interface{}) int {
aAsserted := a.(float32)
bAsserted := b.(float32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float64Comparator provides a basic comparison on float64
func Float64Comparator(a, b interface{}) int {
aAsserted := a.(float64)
bAsserted := b.(float64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// ByteComparator provides a basic comparison on byte
func ByteComparator(a, b interface{}) int {
aAsserted := a.(byte)
bAsserted := b.(byte)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// RuneComparator provides a basic comparison on rune
func RuneComparator(a, b interface{}) int {
aAsserted := a.(rune)
bAsserted := b.(rune)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// TimeComparator provides a basic comparison on time.Time
func TimeComparator(a, b interface{}) int {
aAsserted := a.(time.Time)
bAsserted := b.(time.Time)
switch {
case aAsserted.After(bAsserted):
return 1
case aAsserted.Before(bAsserted):
return -1
default:
return 0
}
}

29
vendor/github.com/emirpasic/gods/utils/sort.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "sort"
// Sort sorts values (in-place) with respect to the given comparator.
//
// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices).
func Sort(values []interface{}, comparator Comparator) {
sort.Sort(sortable{values, comparator})
}
type sortable struct {
values []interface{}
comparator Comparator
}
func (s sortable) Len() int {
return len(s.values)
}
func (s sortable) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
}
func (s sortable) Less(i, j int) bool {
return s.comparator(s.values[i], s.values[j]) < 0
}

47
vendor/github.com/emirpasic/gods/utils/utils.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utils provides common utility functions.
//
// Provided functionalities:
// - sorting
// - comparators
package utils
import (
"fmt"
"strconv"
)
// ToString converts a value to string.
func ToString(value interface{}) string {
switch value.(type) {
case string:
return value.(string)
case int8:
return strconv.FormatInt(int64(value.(int8)), 10)
case int16:
return strconv.FormatInt(int64(value.(int16)), 10)
case int32:
return strconv.FormatInt(int64(value.(int32)), 10)
case int64:
return strconv.FormatInt(int64(value.(int64)), 10)
case uint8:
return strconv.FormatUint(uint64(value.(uint8)), 10)
case uint16:
return strconv.FormatUint(uint64(value.(uint16)), 10)
case uint32:
return strconv.FormatUint(uint64(value.(uint32)), 10)
case uint64:
return strconv.FormatUint(uint64(value.(uint64)), 10)
case float32:
return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64)
case float64:
return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64)
case bool:
return strconv.FormatBool(value.(bool))
default:
return fmt.Sprintf("%+v", value)
}
}

21
vendor/github.com/jbenet/go-context/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

120
vendor/github.com/jbenet/go-context/io/ctxio.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
// Package ctxio provides io.Reader and io.Writer wrappers that
// respect context.Contexts. Use these at the interface between
// your context code and your io.
//
// WARNING: read the code. see how writes and reads will continue
// until you cancel the io. Maybe this package should provide
// versions of io.ReadCloser and io.WriteCloser that automatically
// call .Close when the context expires. But for now -- since in my
// use cases I have long-lived connections with ephemeral io wrappers
// -- this has yet to be a need.
package ctxio
import (
"io"
context "golang.org/x/net/context"
)
type ioret struct {
n int
err error
}
type Writer interface {
io.Writer
}
type ctxWriter struct {
w io.Writer
ctx context.Context
}
// NewWriter wraps a writer to make it respect given Context.
// If there is a blocking write, the returned Writer will return
// whenever the context is cancelled (the return values are n=0
// and err=ctx.Err().)
//
// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
// write-- there is no way to do that with the standard go io
// interface. So the read and write _will_ happen or hang. So, use
// this sparingly, make sure to cancel the read or write as necesary
// (e.g. closing a connection whose context is up, etc.)
//
// Furthermore, in order to protect your memory from being read
// _after_ you've cancelled the context, this io.Writer will
// first make a **copy** of the buffer.
func NewWriter(ctx context.Context, w io.Writer) *ctxWriter {
if ctx == nil {
ctx = context.Background()
}
return &ctxWriter{ctx: ctx, w: w}
}
func (w *ctxWriter) Write(buf []byte) (int, error) {
buf2 := make([]byte, len(buf))
copy(buf2, buf)
c := make(chan ioret, 1)
go func() {
n, err := w.w.Write(buf2)
c <- ioret{n, err}
close(c)
}()
select {
case r := <-c:
return r.n, r.err
case <-w.ctx.Done():
return 0, w.ctx.Err()
}
}
type Reader interface {
io.Reader
}
type ctxReader struct {
r io.Reader
ctx context.Context
}
// NewReader wraps a reader to make it respect given Context.
// If there is a blocking read, the returned Reader will return
// whenever the context is cancelled (the return values are n=0
// and err=ctx.Err().)
//
// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
// write-- there is no way to do that with the standard go io
// interface. So the read and write _will_ happen or hang. So, use
// this sparingly, make sure to cancel the read or write as necesary
// (e.g. closing a connection whose context is up, etc.)
//
// Furthermore, in order to protect your memory from being read
// _before_ you've cancelled the context, this io.Reader will
// allocate a buffer of the same size, and **copy** into the client's
// if the read succeeds in time.
func NewReader(ctx context.Context, r io.Reader) *ctxReader {
return &ctxReader{ctx: ctx, r: r}
}
func (r *ctxReader) Read(buf []byte) (int, error) {
buf2 := make([]byte, len(buf))
c := make(chan ioret, 1)
go func() {
n, err := r.r.Read(buf2)
c <- ioret{n, err}
close(c)
}()
select {
case ret := <-c:
copy(buf, buf2)
return ret.n, ret.err
case <-r.ctx.Done():
return 0, r.ctx.Err()
}
}

4
vendor/github.com/kevinburke/ssh_config/AUTHORS.txt generated vendored Normal file
View File

@ -0,0 +1,4 @@
Eugene Terentev <eugene@terentev.net>
Kevin Burke <kev@inburke.com>
Sergey Lukjanov <me@slukjanov.name>
Wayne Ashley Berry <wayneashleyberry@gmail.com>

49
vendor/github.com/kevinburke/ssh_config/LICENSE generated vendored Normal file
View File

@ -0,0 +1,49 @@
Copyright (c) 2017 Kevin Burke.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
===================
The lexer and parser borrow heavily from github.com/pelletier/go-toml. The
license for that project is copied below.
The MIT License (MIT)
Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

639
vendor/github.com/kevinburke/ssh_config/config.go generated vendored Normal file
View File

@ -0,0 +1,639 @@
// Package ssh_config provides tools for manipulating SSH config files.
//
// Importantly, this parser attempts to preserve comments in a given file, so
// you can manipulate a `ssh_config` file from a program, if your heart desires.
//
// The Get() and GetStrict() functions will attempt to read values from
// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is
// the host name to match on ("example.com"), and the second argument is the key
// you want to retrieve ("Port"). The keywords are case insensitive.
//
// port := ssh_config.Get("myhost", "Port")
//
// You can also manipulate an SSH config file and then print it or write it back
// to disk.
//
// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config"))
// cfg, _ := ssh_config.Decode(f)
// for _, host := range cfg.Hosts {
// fmt.Println("patterns:", host.Patterns)
// for _, node := range host.Nodes {
// fmt.Println(node.String())
// }
// }
//
// // Write the cfg back to disk:
// fmt.Println(cfg.String())
//
// BUG: the Match directive is currently unsupported; parsing a config with
// a Match directive will trigger an error.
package ssh_config
import (
"bytes"
"errors"
"fmt"
"io"
"os"
osuser "os/user"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
)
const version = "0.5"
type configFinder func() string
// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config
// files are parsed and cached the first time Get() or GetStrict() is called.
type UserSettings struct {
IgnoreErrors bool
systemConfig *Config
systemConfigFinder configFinder
userConfig *Config
userConfigFinder configFinder
loadConfigs sync.Once
onceErr error
}
func homedir() string {
user, err := osuser.Current()
if err == nil {
return user.HomeDir
} else {
return os.Getenv("HOME")
}
}
func userConfigFinder() string {
return filepath.Join(homedir(), ".ssh", "config")
}
// DefaultUserSettings is the default UserSettings and is used by Get and
// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys,
// and it will return parse errors (if any) instead of swallowing them.
var DefaultUserSettings = &UserSettings{
IgnoreErrors: false,
systemConfigFinder: systemConfigFinder,
userConfigFinder: userConfigFinder,
}
func systemConfigFinder() string {
return filepath.Join("/", "etc", "ssh", "ssh_config")
}
func findVal(c *Config, alias, key string) (string, error) {
if c == nil {
return "", nil
}
val, err := c.Get(alias, key)
if err != nil || val == "" {
return "", err
}
if err := validate(key, val); err != nil {
return "", err
}
return val, nil
}
// Get finds the first value for key within a declaration that matches the
// alias. Get returns the empty string if no value was found, or if IgnoreErrors
// is false and we could not parse the configuration file. Use GetStrict to
// disambiguate the latter cases.
//
// The match for key is case insensitive.
//
// Get is a wrapper around DefaultUserSettings.Get.
func Get(alias, key string) string {
return DefaultUserSettings.Get(alias, key)
}
// GetStrict finds the first value for key within a declaration that matches the
// alias. If key has a default value and no matching configuration is found, the
// default will be returned. For more information on default values and the way
// patterns are matched, see the manpage for ssh_config.
//
// error will be non-nil if and only if a user's configuration file or the
// system configuration file could not be parsed, and u.IgnoreErrors is false.
//
// GetStrict is a wrapper around DefaultUserSettings.GetStrict.
func GetStrict(alias, key string) (string, error) {
return DefaultUserSettings.GetStrict(alias, key)
}
// Get finds the first value for key within a declaration that matches the
// alias. Get returns the empty string if no value was found, or if IgnoreErrors
// is false and we could not parse the configuration file. Use GetStrict to
// disambiguate the latter cases.
//
// The match for key is case insensitive.
func (u *UserSettings) Get(alias, key string) string {
val, err := u.GetStrict(alias, key)
if err != nil {
return ""
}
return val
}
// GetStrict finds the first value for key within a declaration that matches the
// alias. If key has a default value and no matching configuration is found, the
// default will be returned. For more information on default values and the way
// patterns are matched, see the manpage for ssh_config.
//
// error will be non-nil if and only if a user's configuration file or the
// system configuration file could not be parsed, and u.IgnoreErrors is false.
func (u *UserSettings) GetStrict(alias, key string) (string, error) {
u.loadConfigs.Do(func() {
// can't parse user file, that's ok.
var filename string
if u.userConfigFinder == nil {
filename = userConfigFinder()
} else {
filename = u.userConfigFinder()
}
var err error
u.userConfig, err = parseFile(filename)
if err != nil && os.IsNotExist(err) == false {
u.onceErr = err
return
}
if u.systemConfigFinder == nil {
filename = systemConfigFinder()
} else {
filename = u.systemConfigFinder()
}
u.systemConfig, err = parseFile(filename)
if err != nil && os.IsNotExist(err) == false {
u.onceErr = err
return
}
})
if u.onceErr != nil && u.IgnoreErrors == false {
return "", u.onceErr
}
val, err := findVal(u.userConfig, alias, key)
if err != nil || val != "" {
return val, err
}
val2, err2 := findVal(u.systemConfig, alias, key)
if err2 != nil || val2 != "" {
return val2, err2
}
return Default(key), nil
}
func parseFile(filename string) (*Config, error) {
return parseWithDepth(filename, 0)
}
func parseWithDepth(filename string, depth uint8) (*Config, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
return decode(f, isSystem(filename), depth)
}
func isSystem(filename string) bool {
// TODO i'm not sure this is the best way to detect a system repo
return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh")
}
// Decode reads r into a Config, or returns an error if r could not be parsed as
// an SSH config file.
func Decode(r io.Reader) (*Config, error) {
return decode(r, false, 0)
}
func decode(r io.Reader, system bool, depth uint8) (c *Config, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
if e, ok := r.(error); ok && e == ErrDepthExceeded {
err = e
return
}
err = errors.New(r.(string))
}
}()
c = parseSSH(lexSSH(r), system, depth)
return c, err
}
// Config represents an SSH config file.
type Config struct {
// A list of hosts to match against. The file begins with an implicit
// "Host *" declaration matching all hosts.
Hosts []*Host
depth uint8
position Position
}
// Get finds the first value in the configuration that matches the alias and
// contains key. Get returns the empty string if no value was found, or if the
// Config contains an invalid conditional Include value.
//
// The match for key is case insensitive.
func (c *Config) Get(alias, key string) (string, error) {
lowerKey := strings.ToLower(key)
for _, host := range c.Hosts {
if !host.Matches(alias) {
continue
}
for _, node := range host.Nodes {
switch t := node.(type) {
case *Empty:
continue
case *KV:
// "keys are case insensitive" per the spec
lkey := strings.ToLower(t.Key)
if lkey == "match" {
panic("can't handle Match directives")
}
if lkey == lowerKey {
return t.Value, nil
}
case *Include:
val := t.Get(alias, key)
if val != "" {
return val, nil
}
default:
return "", fmt.Errorf("unknown Node type %v", t)
}
}
}
return "", nil
}
// String returns a string representation of the Config file.
func (c Config) String() string {
return marshal(c).String()
}
func (c Config) MarshalText() ([]byte, error) {
return marshal(c).Bytes(), nil
}
func marshal(c Config) *bytes.Buffer {
var buf bytes.Buffer
for i := range c.Hosts {
buf.WriteString(c.Hosts[i].String())
}
return &buf
}
// Pattern is a pattern in a Host declaration. Patterns are read-only values;
// create a new one with NewPattern().
type Pattern struct {
str string // Its appearance in the file, not the value that gets compiled.
regex *regexp.Regexp
not bool // True if this is a negated match
}
// String prints the string representation of the pattern.
func (p Pattern) String() string {
return p.str
}
// Copied from regexp.go with * and ? removed.
var specialBytes = []byte(`\.+()|[]{}^$`)
func special(b byte) bool {
return bytes.IndexByte(specialBytes, b) >= 0
}
// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates
// a Pattern that matches all hosts.
//
// From the manpage, a pattern consists of zero or more non-whitespace
// characters, `*' (a wildcard that matches zero or more characters), or `?' (a
// wildcard that matches exactly one character). For example, to specify a set
// of declarations for any host in the ".co.uk" set of domains, the following
// pattern could be used:
//
// Host *.co.uk
//
// The following pattern would match any host in the 192.168.0.[0-9] network range:
//
// Host 192.168.0.?
func NewPattern(s string) (*Pattern, error) {
if s == "" {
return nil, errors.New("ssh_config: empty pattern")
}
negated := false
if s[0] == '!' {
negated = true
s = s[1:]
}
var buf bytes.Buffer
buf.WriteByte('^')
for i := 0; i < len(s); i++ {
// A byte loop is correct because all metacharacters are ASCII.
switch b := s[i]; b {
case '*':
buf.WriteString(".*")
case '?':
buf.WriteString(".?")
default:
// borrowing from QuoteMeta here.
if special(b) {
buf.WriteByte('\\')
}
buf.WriteByte(b)
}
}
buf.WriteByte('$')
r, err := regexp.Compile(buf.String())
if err != nil {
return nil, err
}
return &Pattern{str: s, regex: r, not: negated}, nil
}
// Host describes a Host directive and the keywords that follow it.
type Host struct {
// A list of host patterns that should match this host.
Patterns []*Pattern
// A Node is either a key/value pair or a comment line.
Nodes []Node
// EOLComment is the comment (if any) terminating the Host line.
EOLComment string
hasEquals bool
leadingSpace uint16 // TODO: handle spaces vs tabs here.
// The file starts with an implicit "Host *" declaration.
implicit bool
}
// Matches returns true if the Host matches for the given alias. For
// a description of the rules that provide a match, see the manpage for
// ssh_config.
func (h *Host) Matches(alias string) bool {
found := false
for i := range h.Patterns {
if h.Patterns[i].regex.MatchString(alias) {
if h.Patterns[i].not == true {
// Negated match. "A pattern entry may be negated by prefixing
// it with an exclamation mark (`!'). If a negated entry is
// matched, then the Host entry is ignored, regardless of
// whether any other patterns on the line match. Negated matches
// are therefore useful to provide exceptions for wildcard
// matches."
return false
}
found = true
}
}
return found
}
// String prints h as it would appear in a config file. Minor tweaks may be
// present in the whitespace in the printed file.
func (h *Host) String() string {
var buf bytes.Buffer
if h.implicit == false {
buf.WriteString(strings.Repeat(" ", int(h.leadingSpace)))
buf.WriteString("Host")
if h.hasEquals {
buf.WriteString(" = ")
} else {
buf.WriteString(" ")
}
for i, pat := range h.Patterns {
buf.WriteString(pat.String())
if i < len(h.Patterns)-1 {
buf.WriteString(" ")
}
}
if h.EOLComment != "" {
buf.WriteString(" #")
buf.WriteString(h.EOLComment)
}
buf.WriteByte('\n')
}
for i := range h.Nodes {
buf.WriteString(h.Nodes[i].String())
buf.WriteByte('\n')
}
return buf.String()
}
// Node represents a line in a Config.
type Node interface {
Pos() Position
String() string
}
// KV is a line in the config file that contains a key, a value, and possibly
// a comment.
type KV struct {
Key string
Value string
Comment string
hasEquals bool
leadingSpace uint16 // Space before the key. TODO handle spaces vs tabs.
position Position
}
// Pos returns k's Position.
func (k *KV) Pos() Position {
return k.position
}
// String prints k as it was parsed in the config file. There may be slight
// changes to the whitespace between values.
func (k *KV) String() string {
if k == nil {
return ""
}
equals := " "
if k.hasEquals {
equals = " = "
}
line := fmt.Sprintf("%s%s%s%s", strings.Repeat(" ", int(k.leadingSpace)), k.Key, equals, k.Value)
if k.Comment != "" {
line += " #" + k.Comment
}
return line
}
// Empty is a line in the config file that contains only whitespace or comments.
type Empty struct {
Comment string
leadingSpace uint16 // TODO handle spaces vs tabs.
position Position
}
// Pos returns e's Position.
func (e *Empty) Pos() Position {
return e.position
}
// String prints e as it was parsed in the config file.
func (e *Empty) String() string {
if e == nil {
return ""
}
if e.Comment == "" {
return ""
}
return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment)
}
// Include holds the result of an Include directive, including the config files
// that have been parsed as part of that directive. At most 5 levels of Include
// statements will be parsed.
type Include struct {
// Comment is the contents of any comment at the end of the Include
// statement.
Comment string
parsed bool
// an include directive can include several different files, and wildcards
directives []string
mu sync.Mutex
// 1:1 mapping between matches and keys in files array; matches preserves
// ordering
matches []string
// actual filenames are listed here
files map[string]*Config
leadingSpace uint16
position Position
depth uint8
hasEquals bool
}
const maxRecurseDepth = 5
// ErrDepthExceeded is returned if too many Include directives are parsed.
// Usually this indicates a recursive loop (an Include directive pointing to the
// file it contains).
var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded")
func removeDups(arr []string) []string {
// Use map to record duplicates as we find them.
encountered := make(map[string]bool, len(arr))
result := make([]string, 0)
for v := range arr {
if encountered[arr[v]] == false {
encountered[arr[v]] = true
result = append(result, arr[v])
}
}
return result
}
// NewInclude creates a new Include with a list of file globs to include.
// Configuration files are parsed greedily (e.g. as soon as this function runs).
// Any error encountered while parsing nested configuration files will be
// returned.
func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) {
if depth > maxRecurseDepth {
return nil, ErrDepthExceeded
}
inc := &Include{
Comment: comment,
directives: directives,
files: make(map[string]*Config),
position: pos,
leadingSpace: uint16(pos.Col) - 1,
depth: depth,
hasEquals: hasEquals,
}
// no need for inc.mu.Lock() since nothing else can access this inc
matches := make([]string, 0)
for i := range directives {
var path string
if filepath.IsAbs(directives[i]) {
path = directives[i]
} else if system {
path = filepath.Join("/etc/ssh", directives[i])
} else {
path = filepath.Join(homedir(), ".ssh", directives[i])
}
theseMatches, err := filepath.Glob(path)
if err != nil {
return nil, err
}
matches = append(matches, theseMatches...)
}
matches = removeDups(matches)
inc.matches = matches
for i := range matches {
config, err := parseWithDepth(matches[i], depth)
if err != nil {
return nil, err
}
inc.files[matches[i]] = config
}
return inc, nil
}
// Pos returns the position of the Include directive in the larger file.
func (i *Include) Pos() Position {
return i.position
}
// Get finds the first value in the Include statement matching the alias and the
// given key.
func (inc *Include) Get(alias, key string) string {
inc.mu.Lock()
defer inc.mu.Unlock()
// TODO: we search files in any order which is not correct
for i := range inc.matches {
cfg := inc.files[inc.matches[i]]
if cfg == nil {
panic("nil cfg")
}
val, err := cfg.Get(alias, key)
if err == nil && val != "" {
return val
}
}
return ""
}
// String prints out a string representation of this Include directive. Note
// included Config files are not printed as part of this representation.
func (inc *Include) String() string {
equals := " "
if inc.hasEquals {
equals = " = "
}
line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " "))
if inc.Comment != "" {
line += " #" + inc.Comment
}
return line
}
var matchAll *Pattern
func init() {
var err error
matchAll, err = NewPattern("*")
if err != nil {
panic(err)
}
}
func newConfig() *Config {
return &Config{
Hosts: []*Host{
&Host{
implicit: true,
Patterns: []*Pattern{matchAll},
Nodes: make([]Node, 0),
},
},
depth: 0,
}
}

241
vendor/github.com/kevinburke/ssh_config/lexer.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package ssh_config
import (
"io"
buffruneio "github.com/pelletier/go-buffruneio"
)
// Define state functions
type sshLexStateFn func() sshLexStateFn
type sshLexer struct {
input *buffruneio.Reader // Textual source
buffer []rune // Runes composing the current token
tokens chan token
line uint32
col uint16
endbufferLine uint32
endbufferCol uint16
}
func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn {
return func() sshLexStateFn {
growingString := ""
for next := s.peek(); next != '\n' && next != eof; next = s.peek() {
if next == '\r' && s.follow("\r\n") {
break
}
growingString += string(next)
s.next()
}
s.emitWithValue(tokenComment, growingString)
s.skip()
return previousState
}
}
// lex the space after an equals sign in a function
func (s *sshLexer) lexRspace() sshLexStateFn {
for {
next := s.peek()
if !isSpace(next) {
break
}
s.skip()
}
return s.lexRvalue
}
func (s *sshLexer) lexEquals() sshLexStateFn {
for {
next := s.peek()
if next == '=' {
s.emit(tokenEquals)
s.skip()
return s.lexRspace
}
// TODO error handling here; newline eof etc.
if !isSpace(next) {
break
}
s.skip()
}
return s.lexRvalue
}
func (s *sshLexer) lexKey() sshLexStateFn {
growingString := ""
for r := s.peek(); isKeyChar(r); r = s.peek() {
// simplified a lot here
if isSpace(r) || r == '=' {
s.emitWithValue(tokenKey, growingString)
s.skip()
return s.lexEquals
}
growingString += string(r)
s.next()
}
s.emitWithValue(tokenKey, growingString)
return s.lexEquals
}
func (s *sshLexer) lexRvalue() sshLexStateFn {
growingString := ""
for {
next := s.peek()
switch next {
case '\r':
if s.follow("\r\n") {
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexVoid
}
case '\n':
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexVoid
case '#':
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexComment(s.lexVoid)
case eof:
s.next()
}
if next == eof {
break
}
growingString += string(next)
s.next()
}
s.emit(tokenEOF)
return nil
}
func (s *sshLexer) read() rune {
r, _, err := s.input.ReadRune()
if err != nil {
panic(err)
}
if r == '\n' {
s.endbufferLine++
s.endbufferCol = 1
} else {
s.endbufferCol++
}
return r
}
func (s *sshLexer) next() rune {
r := s.read()
if r != eof {
s.buffer = append(s.buffer, r)
}
return r
}
func (s *sshLexer) lexVoid() sshLexStateFn {
for {
next := s.peek()
switch next {
case '#':
s.skip()
return s.lexComment(s.lexVoid)
case '\r':
fallthrough
case '\n':
s.emit(tokenEmptyLine)
s.skip()
continue
}
if isSpace(next) {
s.skip()
}
if isKeyStartChar(next) {
return s.lexKey
}
// removed IsKeyStartChar and lexKey. probably will need to readd
if next == eof {
s.next()
break
}
}
s.emit(tokenEOF)
return nil
}
func (s *sshLexer) ignore() {
s.buffer = make([]rune, 0)
s.line = s.endbufferLine
s.col = s.endbufferCol
}
func (s *sshLexer) skip() {
s.next()
s.ignore()
}
func (s *sshLexer) emit(t tokenType) {
s.emitWithValue(t, string(s.buffer))
}
func (s *sshLexer) emitWithValue(t tokenType, value string) {
tok := token{
Position: Position{s.line, s.col},
typ: t,
val: value,
}
s.tokens <- tok
s.ignore()
}
func (s *sshLexer) peek() rune {
r, _, err := s.input.ReadRune()
if err != nil {
panic(err)
}
s.input.UnreadRune()
return r
}
func (s *sshLexer) follow(next string) bool {
for _, expectedRune := range next {
r, _, err := s.input.ReadRune()
defer s.input.UnreadRune()
if err != nil {
panic(err)
}
if expectedRune != r {
return false
}
}
return true
}
func (s *sshLexer) run() {
for state := s.lexVoid; state != nil; {
state = state()
}
close(s.tokens)
}
func lexSSH(input io.Reader) chan token {
bufferedInput := buffruneio.NewReader(input)
l := &sshLexer{
input: bufferedInput,
tokens: make(chan token),
line: 1,
col: 1,
endbufferLine: 1,
endbufferCol: 1,
}
go l.run()
return l.tokens
}

185
vendor/github.com/kevinburke/ssh_config/parser.go generated vendored Normal file
View File

@ -0,0 +1,185 @@
package ssh_config
import (
"fmt"
"strings"
)
type sshParser struct {
flow chan token
config *Config
tokensBuffer []token
currentTable []string
seenTableKeys []string
// /etc/ssh parser or local parser - used to find the default for relative
// filepaths in the Include directive
system bool
depth uint8
}
type sshParserStateFn func() sshParserStateFn
// Formats and panics an error message based on a token
func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) {
// TODO this format is ugly
panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
}
func (p *sshParser) raiseError(tok *token, err error) {
if err == ErrDepthExceeded {
panic(err)
}
// TODO this format is ugly
panic(tok.Position.String() + ": " + err.Error())
}
func (p *sshParser) run() {
for state := p.parseStart; state != nil; {
state = state()
}
}
func (p *sshParser) peek() *token {
if len(p.tokensBuffer) != 0 {
return &(p.tokensBuffer[0])
}
tok, ok := <-p.flow
if !ok {
return nil
}
p.tokensBuffer = append(p.tokensBuffer, tok)
return &tok
}
func (p *sshParser) getToken() *token {
if len(p.tokensBuffer) != 0 {
tok := p.tokensBuffer[0]
p.tokensBuffer = p.tokensBuffer[1:]
return &tok
}
tok, ok := <-p.flow
if !ok {
return nil
}
return &tok
}
func (p *sshParser) parseStart() sshParserStateFn {
tok := p.peek()
// end of stream, parsing is finished
if tok == nil {
return nil
}
switch tok.typ {
case tokenComment, tokenEmptyLine:
return p.parseComment
case tokenKey:
return p.parseKV
case tokenEOF:
return nil
default:
p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok))
}
return nil
}
func (p *sshParser) parseKV() sshParserStateFn {
key := p.getToken()
hasEquals := false
val := p.getToken()
if val.typ == tokenEquals {
hasEquals = true
val = p.getToken()
}
comment := ""
tok := p.peek()
if tok == nil {
tok = &token{typ: tokenEOF}
}
if tok.typ == tokenComment && tok.Position.Line == val.Position.Line {
tok = p.getToken()
comment = tok.val
}
if strings.ToLower(key.val) == "match" {
// https://github.com/kevinburke/ssh_config/issues/6
p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported")
return nil
}
if strings.ToLower(key.val) == "host" {
strPatterns := strings.Split(val.val, " ")
patterns := make([]*Pattern, 0)
for i := range strPatterns {
if strPatterns[i] == "" {
continue
}
pat, err := NewPattern(strPatterns[i])
if err != nil {
p.raiseErrorf(val, "Invalid host pattern: %v", err)
return nil
}
patterns = append(patterns, pat)
}
p.config.Hosts = append(p.config.Hosts, &Host{
Patterns: patterns,
Nodes: make([]Node, 0),
EOLComment: comment,
hasEquals: hasEquals,
})
return p.parseStart
}
lastHost := p.config.Hosts[len(p.config.Hosts)-1]
if strings.ToLower(key.val) == "include" {
inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1)
if err == ErrDepthExceeded {
p.raiseError(val, err)
return nil
}
if err != nil {
p.raiseErrorf(val, "Error parsing Include directive: %v", err)
return nil
}
lastHost.Nodes = append(lastHost.Nodes, inc)
return p.parseStart
}
kv := &KV{
Key: key.val,
Value: val.val,
Comment: comment,
hasEquals: hasEquals,
leadingSpace: uint16(key.Position.Col) - 1,
position: key.Position,
}
lastHost.Nodes = append(lastHost.Nodes, kv)
return p.parseStart
}
func (p *sshParser) parseComment() sshParserStateFn {
comment := p.getToken()
lastHost := p.config.Hosts[len(p.config.Hosts)-1]
lastHost.Nodes = append(lastHost.Nodes, &Empty{
Comment: comment.val,
// account for the "#" as well
leadingSpace: comment.Position.Col - 2,
position: comment.Position,
})
return p.parseStart
}
func parseSSH(flow chan token, system bool, depth uint8) *Config {
result := newConfig()
result.position = Position{1, 1}
parser := &sshParser{
flow: flow,
config: result,
tokensBuffer: make([]token, 0),
currentTable: make([]string, 0),
seenTableKeys: make([]string, 0),
system: system,
depth: depth,
}
parser.run()
return result
}

25
vendor/github.com/kevinburke/ssh_config/position.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package ssh_config
import "fmt"
// Position of a document element within a SSH document.
//
// Line and Col are both 1-indexed positions for the element's line number and
// column number, respectively. Values of zero or less will cause Invalid(),
// to return true.
type Position struct {
Line uint32 // line within the document
Col uint16 // column within the line
}
// String representation of the position.
// Displays 1-indexed line and column numbers.
func (p Position) String() string {
return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
}
// Invalid returns whether or not the position is valid (i.e. with negative or
// null values)
func (p Position) Invalid() bool {
return p.Line <= 0 || p.Col <= 0
}

49
vendor/github.com/kevinburke/ssh_config/token.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package ssh_config
import "fmt"
type token struct {
Position
typ tokenType
val string
}
func (t token) String() string {
switch t.typ {
case tokenEOF:
return "EOF"
}
return fmt.Sprintf("%q", t.val)
}
type tokenType int
const (
eof = -(iota + 1)
)
const (
tokenError tokenType = iota
tokenEOF
tokenEmptyLine
tokenComment
tokenKey
tokenEquals
tokenString
)
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
func isKeyStartChar(r rune) bool {
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof)
}
// I'm not sure that this is correct
func isKeyChar(r rune) bool {
// Keys start with the first character that isn't whitespace or [ and end
// with the last non-whitespace character before the equals sign. Keys
// cannot contain a # character."
return !(r == '\r' || r == '\n' || r == eof || r == '=')
}

162
vendor/github.com/kevinburke/ssh_config/validators.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
package ssh_config
import (
"fmt"
"strconv"
"strings"
)
// Default returns the default value for the given keyword, for example "22" if
// the keyword is "Port". Default returns the empty string if the keyword has no
// default, or if the keyword is unknown. Keyword matching is case-insensitive.
//
// Default values are provided by OpenSSH_7.4p1 on a Mac.
func Default(keyword string) string {
return defaults[strings.ToLower(keyword)]
}
// Arguments where the value must be "yes" or "no" and *only* yes or no.
var yesnos = map[string]bool{
strings.ToLower("BatchMode"): true,
strings.ToLower("CanonicalizeFallbackLocal"): true,
strings.ToLower("ChallengeResponseAuthentication"): true,
strings.ToLower("CheckHostIP"): true,
strings.ToLower("ClearAllForwardings"): true,
strings.ToLower("Compression"): true,
strings.ToLower("EnableSSHKeysign"): true,
strings.ToLower("ExitOnForwardFailure"): true,
strings.ToLower("ForwardAgent"): true,
strings.ToLower("ForwardX11"): true,
strings.ToLower("ForwardX11Trusted"): true,
strings.ToLower("GatewayPorts"): true,
strings.ToLower("GSSAPIAuthentication"): true,
strings.ToLower("GSSAPIDelegateCredentials"): true,
strings.ToLower("HostbasedAuthentication"): true,
strings.ToLower("IdentitiesOnly"): true,
strings.ToLower("KbdInteractiveAuthentication"): true,
strings.ToLower("NoHostAuthenticationForLocalhost"): true,
strings.ToLower("PasswordAuthentication"): true,
strings.ToLower("PermitLocalCommand"): true,
strings.ToLower("PubkeyAuthentication"): true,
strings.ToLower("RhostsRSAAuthentication"): true,
strings.ToLower("RSAAuthentication"): true,
strings.ToLower("StreamLocalBindUnlink"): true,
strings.ToLower("TCPKeepAlive"): true,
strings.ToLower("UseKeychain"): true,
strings.ToLower("UsePrivilegedPort"): true,
strings.ToLower("VisualHostKey"): true,
}
var uints = map[string]bool{
strings.ToLower("CanonicalizeMaxDots"): true,
strings.ToLower("CompressionLevel"): true, // 1 to 9
strings.ToLower("ConnectionAttempts"): true,
strings.ToLower("ConnectTimeout"): true,
strings.ToLower("NumberOfPasswordPrompts"): true,
strings.ToLower("Port"): true,
strings.ToLower("ServerAliveCountMax"): true,
strings.ToLower("ServerAliveInterval"): true,
}
func mustBeYesOrNo(lkey string) bool {
return yesnos[lkey]
}
func mustBeUint(lkey string) bool {
return uints[lkey]
}
func validate(key, val string) error {
lkey := strings.ToLower(key)
if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") {
return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val)
}
if mustBeUint(lkey) {
_, err := strconv.ParseUint(val, 10, 64)
if err != nil {
return fmt.Errorf("ssh_config: %v", err)
}
}
return nil
}
var defaults = map[string]string{
strings.ToLower("AddKeysToAgent"): "no",
strings.ToLower("AddressFamily"): "any",
strings.ToLower("BatchMode"): "no",
strings.ToLower("CanonicalizeFallbackLocal"): "yes",
strings.ToLower("CanonicalizeHostname"): "no",
strings.ToLower("CanonicalizeMaxDots"): "1",
strings.ToLower("ChallengeResponseAuthentication"): "yes",
strings.ToLower("CheckHostIP"): "yes",
// TODO is this still the correct cipher
strings.ToLower("Cipher"): "3des",
strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc",
strings.ToLower("ClearAllForwardings"): "no",
strings.ToLower("Compression"): "no",
strings.ToLower("CompressionLevel"): "6",
strings.ToLower("ConnectionAttempts"): "1",
strings.ToLower("ControlMaster"): "no",
strings.ToLower("EnableSSHKeysign"): "no",
strings.ToLower("EscapeChar"): "~",
strings.ToLower("ExitOnForwardFailure"): "no",
strings.ToLower("FingerprintHash"): "sha256",
strings.ToLower("ForwardAgent"): "no",
strings.ToLower("ForwardX11"): "no",
strings.ToLower("ForwardX11Timeout"): "20m",
strings.ToLower("ForwardX11Trusted"): "no",
strings.ToLower("GatewayPorts"): "no",
strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2",
strings.ToLower("GSSAPIAuthentication"): "no",
strings.ToLower("GSSAPIDelegateCredentials"): "no",
strings.ToLower("HashKnownHosts"): "no",
strings.ToLower("HostbasedAuthentication"): "no",
strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
// HostName has a dynamic default (the value passed at the command line).
strings.ToLower("IdentitiesOnly"): "no",
strings.ToLower("IdentityFile"): "~/.ssh/identity",
// IPQoS has a dynamic default based on interactive or non-interactive
// sessions.
strings.ToLower("KbdInteractiveAuthentication"): "yes",
strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1",
strings.ToLower("LogLevel"): "INFO",
strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1",
strings.ToLower("NoHostAuthenticationForLocalhost"): "no",
strings.ToLower("NumberOfPasswordPrompts"): "3",
strings.ToLower("PasswordAuthentication"): "yes",
strings.ToLower("PermitLocalCommand"): "no",
strings.ToLower("Port"): "22",
strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password",
strings.ToLower("Protocol"): "2",
strings.ToLower("ProxyUseFdpass"): "no",
strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
strings.ToLower("PubkeyAuthentication"): "yes",
strings.ToLower("RekeyLimit"): "default none",
strings.ToLower("RhostsRSAAuthentication"): "no",
strings.ToLower("RSAAuthentication"): "yes",
strings.ToLower("ServerAliveCountMax"): "3",
strings.ToLower("ServerAliveInterval"): "0",
strings.ToLower("StreamLocalBindMask"): "0177",
strings.ToLower("StreamLocalBindUnlink"): "no",
strings.ToLower("StrictHostKeyChecking"): "ask",
strings.ToLower("TCPKeepAlive"): "yes",
strings.ToLower("Tunnel"): "no",
strings.ToLower("TunnelDevice"): "any:any",
strings.ToLower("UpdateHostKeys"): "no",
strings.ToLower("UseKeychain"): "no",
strings.ToLower("UsePrivilegedPort"): "no",
strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2",
strings.ToLower("VerifyHostKeyDNS"): "no",
strings.ToLower("VisualHostKey"): "no",
strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth",
}

21
vendor/github.com/mitchellh/go-homedir/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

157
vendor/github.com/mitchellh/go-homedir/homedir.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
package homedir
import (
"bytes"
"errors"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
)
// DisableCache will disable caching of the home directory. Caching is enabled
// by default.
var DisableCache bool
var homedirCache string
var cacheLock sync.RWMutex
// Dir returns the home directory for the executing user.
//
// This uses an OS-specific method for discovering the home directory.
// An error is returned if a home directory cannot be detected.
func Dir() (string, error) {
if !DisableCache {
cacheLock.RLock()
cached := homedirCache
cacheLock.RUnlock()
if cached != "" {
return cached, nil
}
}
cacheLock.Lock()
defer cacheLock.Unlock()
var result string
var err error
if runtime.GOOS == "windows" {
result, err = dirWindows()
} else {
// Unix-like system, so just assume Unix
result, err = dirUnix()
}
if err != nil {
return "", err
}
homedirCache = result
return result, nil
}
// Expand expands the path to include the home directory if the path
// is prefixed with `~`. If it isn't prefixed with `~`, the path is
// returned as-is.
func Expand(path string) (string, error) {
if len(path) == 0 {
return path, nil
}
if path[0] != '~' {
return path, nil
}
if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
return "", errors.New("cannot expand user-specific home dir")
}
dir, err := Dir()
if err != nil {
return "", err
}
return filepath.Join(dir, path[1:]), nil
}
func dirUnix() (string, error) {
homeEnv := "HOME"
if runtime.GOOS == "plan9" {
// On plan9, env vars are lowercase.
homeEnv = "home"
}
// First prefer the HOME environmental variable
if home := os.Getenv(homeEnv); home != "" {
return home, nil
}
var stdout bytes.Buffer
// If that fails, try OS specific commands
if runtime.GOOS == "darwin" {
cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
cmd.Stdout = &stdout
if err := cmd.Run(); err == nil {
result := strings.TrimSpace(stdout.String())
if result != "" {
return result, nil
}
}
} else {
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
// If the error is ErrNotFound, we ignore it. Otherwise, return it.
if err != exec.ErrNotFound {
return "", err
}
} else {
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
// username:password:uid:gid:gecos:home:shell
passwdParts := strings.SplitN(passwd, ":", 7)
if len(passwdParts) > 5 {
return passwdParts[5], nil
}
}
}
}
// If all else fails, try the shell
stdout.Reset()
cmd := exec.Command("sh", "-c", "cd && pwd")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func dirWindows() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// Prefer standard environment variable USERPROFILE
if home := os.Getenv("USERPROFILE"); home != "" {
return home, nil
}
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
}
return home, nil
}

117
vendor/github.com/pelletier/go-buffruneio/buffruneio.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Package buffruneio is a wrapper around bufio to provide buffered runes access with unlimited unreads.
package buffruneio
import (
"bufio"
"container/list"
"errors"
"io"
)
// Rune to indicate end of file.
const (
EOF = -(iota + 1)
)
// ErrNoRuneToUnread is returned by UnreadRune() when the read index is already at the beginning of the buffer.
var ErrNoRuneToUnread = errors.New("no rune to unwind")
// Reader implements runes buffering for an io.Reader object.
type Reader struct {
buffer *list.List
current *list.Element
input *bufio.Reader
}
// NewReader returns a new Reader.
func NewReader(rd io.Reader) *Reader {
return &Reader{
buffer: list.New(),
input: bufio.NewReader(rd),
}
}
type runeWithSize struct {
r rune
size int
}
func (rd *Reader) feedBuffer() error {
r, size, err := rd.input.ReadRune()
if err != nil {
if err != io.EOF {
return err
}
r = EOF
}
newRuneWithSize := runeWithSize{r, size}
rd.buffer.PushBack(newRuneWithSize)
if rd.current == nil {
rd.current = rd.buffer.Back()
}
return nil
}
// ReadRune reads the next rune from buffer, or from the underlying reader if needed.
func (rd *Reader) ReadRune() (rune, int, error) {
if rd.current == rd.buffer.Back() || rd.current == nil {
err := rd.feedBuffer()
if err != nil {
return EOF, 0, err
}
}
runeWithSize := rd.current.Value.(runeWithSize)
rd.current = rd.current.Next()
return runeWithSize.r, runeWithSize.size, nil
}
// UnreadRune pushes back the previously read rune in the buffer, extending it if needed.
func (rd *Reader) UnreadRune() error {
if rd.current == rd.buffer.Front() {
return ErrNoRuneToUnread
}
if rd.current == nil {
rd.current = rd.buffer.Back()
} else {
rd.current = rd.current.Prev()
}
return nil
}
// Forget removes runes stored before the current stream position index.
func (rd *Reader) Forget() {
if rd.current == nil {
rd.current = rd.buffer.Back()
}
for ; rd.current != rd.buffer.Front(); rd.buffer.Remove(rd.current.Prev()) {
}
}
// PeekRune returns at most the next n runes, reading from the uderlying source if
// needed. Does not move the current index. It includes EOF if reached.
func (rd *Reader) PeekRunes(n int) []rune {
res := make([]rune, 0, n)
cursor := rd.current
for i := 0; i < n; i++ {
if cursor == nil {
err := rd.feedBuffer()
if err != nil {
return res
}
cursor = rd.buffer.Back()
}
if cursor != nil {
r := cursor.Value.(runeWithSize).r
res = append(res, r)
if r == EOF {
return res
}
cursor = cursor.Next()
}
}
return res
}

28
vendor/github.com/src-d/gcfg/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

145
vendor/github.com/src-d/gcfg/doc.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
// Package gcfg reads "INI-style" text-based configuration files with
// "name=value" pairs grouped into sections (gcfg files).
//
// This package is still a work in progress; see the sections below for planned
// changes.
//
// Syntax
//
// The syntax is based on that used by git config:
// http://git-scm.com/docs/git-config#_syntax .
// There are some (planned) differences compared to the git config format:
// - improve data portability:
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
// - include and "path" type is not supported
// (path type may be implementable as a user-defined type)
// - internationalization
// - section and variable names can contain unicode letters, unicode digits
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
// (U+002D), starting with a unicode letter
// - disallow potentially ambiguous or misleading definitions:
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
// - `[sec ""]` is not allowed
// - use `[sec]` for section name "sec" and empty subsection name
// - (planned) within a single file, definitions must be contiguous for each:
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
//
// Data structure
//
// The functions in this package read values into a user-defined struct.
// Each section corresponds to a struct field in the config struct, and each
// variable in a section corresponds to a data field in the section struct.
// The mapping of each section or variable name to fields is done either based
// on the "gcfg" struct tag or by matching the name of the section or variable,
// ignoring case. In the latter case, hyphens '-' in section and variable names
// correspond to underscores '_' in field names.
// Fields must be exported; to use a section or variable name starting with a
// letter that is neither upper- or lower-case, prefix the field name with 'X'.
// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
//
// For sections with subsections, the corresponding field in config must be a
// map, rather than a struct, with string keys and pointer-to-struct values.
// Values for subsection variables are stored in the map with the subsection
// name used as the map key.
// (Note that unlike section and variable names, subsection names are case
// sensitive.)
// When using a map, and there is a section with the same section name but
// without a subsection name, its values are stored with the empty string used
// as the key.
// It is possible to provide default values for subsections in the section
// "default-<sectionname>" (or by setting values in the corresponding struct
// field "Default_<sectionname>").
//
// The functions in this package panic if config is not a pointer to a struct,
// or when a field is not of a suitable type (either a struct or a map with
// string keys and pointer-to-struct values).
//
// Parsing of values
//
// The section structs in the config struct may contain single-valued or
// multi-valued variables. Variables of unnamed slice type (that is, a type
// starting with `[]`) are treated as multi-value; all others (including named
// slice types) are treated as single-valued variables.
//
// Single-valued variables are handled based on the type as follows.
// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
// and if necessary, a new instance is allocated.
//
// For types implementing the encoding.TextUnmarshaler interface, the
// UnmarshalText method is used to set the value. Implementing this method is
// the recommended way for parsing user-defined types.
//
// For fields of string kind, the value string is assigned to the field, after
// unquoting and unescaping as needed.
// For fields of bool kind, the field is set to true if the value is "true",
// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
// "0", ignoring case. In addition, single-valued bool fields can be specified
// with a "blank" value (variable name without equals sign and value); in such
// case the value is set to true.
//
// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
// unintuitively handling zero-padded numbers as octal.) Other types having
// [u]int* as the underlying type, such as os.FileMode and uintptr allow
// decimal, hexadecimal, or octal values.
// Parsing mode for integer types can be overridden using the struct tag option
// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
// (each standing for decimal, hexadecimal, and octal, respectively.)
//
// All other types are parsed using fmt.Sscanf with the "%v" verb.
//
// For multi-valued variables, each individual value is parsed as above and
// appended to the slice. If the first value is specified as a "blank" value
// (variable name without equals sign and value), a new slice is allocated;
// that is any values previously set in the slice will be ignored.
//
// The types subpackage for provides helpers for parsing "enum-like" and integer
// types.
//
// Error handling
//
// There are 3 types of errors:
//
// - programmer errors / panics:
// - invalid configuration structure
// - data errors:
// - fatal errors:
// - invalid configuration syntax
// - warnings:
// - data that doesn't belong to any part of the config structure
//
// Programmer errors trigger panics. These are should be fixed by the programmer
// before releasing code that uses gcfg.
//
// Data errors cause gcfg to return a non-nil error value. This includes the
// case when there are extra unknown key-value definitions in the configuration
// data (extra data).
// However, in some occasions it is desirable to be able to proceed in
// situations when the only data error is that of extra data.
// These errors are handled at a different (warning) priority and can be
// filtered out programmatically. To ignore extra data warnings, wrap the
// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
//
// TODO
//
// The following is a list of changes under consideration:
// - documentation
// - self-contained syntax documentation
// - more practical examples
// - move TODOs to issue tracker (eventually)
// - syntax
// - reconsider valid escape sequences
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
// - reading / parsing gcfg files
// - define internal representation structure
// - support multiple inputs (readers, strings, files)
// - support declaring encoding (?)
// - support varying fields sets for subsections (?)
// - writing gcfg files
// - error handling
// - make error context accessible programmatically?
// - limit input size?
//
package gcfg // import "github.com/src-d/gcfg"

41
vendor/github.com/src-d/gcfg/errors.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package gcfg
import (
"gopkg.in/warnings.v0"
)
// FatalOnly filters the results of a Read*Into invocation and returns only
// fatal errors. That is, errors (warnings) indicating data for unknown
// sections / variables is ignored. Example invocation:
//
// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
// if err != nil {
// ...
//
func FatalOnly(err error) error {
return warnings.FatalOnly(err)
}
func isFatal(err error) bool {
_, ok := err.(extraData)
return !ok
}
type extraData struct {
section string
subsection *string
variable *string
}
func (e extraData) Error() string {
s := "can't store data at section \"" + e.section + "\""
if e.subsection != nil {
s += ", subsection \"" + *e.subsection + "\""
}
if e.variable != nil {
s += ", variable \"" + *e.variable + "\""
}
return s
}
var _ error = extraData{}

7
vendor/github.com/src-d/gcfg/go1_0.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

9
vendor/github.com/src-d/gcfg/go1_2.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

273
vendor/github.com/src-d/gcfg/read.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/src-d/gcfg/scanner"
"github.com/src-d/gcfg/token"
"gopkg.in/warnings.v0"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
u, q, esc := make([]rune, 0, len(s)), false, false
for _, c := range s {
if esc {
uc, ok := unescape[c]
switch {
case ok:
u = append(u, uc)
fallthrough
case !q && c == '\n':
esc = false
continue
}
panic("invalid escape sequence")
}
switch c {
case '"':
q = !q
case '\\':
esc = true
default:
u = append(u, c)
}
}
if q {
panic("missing end quote")
}
if esc {
panic("invalid escape sequence")
}
return string(u)
}
func read(c *warnings.Collector, callback func(string, string, string, string, bool) error,
fset *token.FileSet, file *token.File, src []byte) error {
//
var s scanner.Scanner
var errs scanner.ErrorList
s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
sect, sectsub := "", ""
pos, tok, lit := s.Scan()
errfn := func(msg string) error {
return fmt.Errorf("%s: %s", fset.Position(pos), msg)
}
for {
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
switch tok {
case token.EOF:
return nil
case token.EOL, token.COMMENT:
pos, tok, lit = s.Scan()
case token.LBRACK:
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.IDENT {
if err := c.Collect(errfn("expected section name")); err != nil {
return err
}
}
sect, sectsub = lit, ""
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok == token.STRING {
sectsub = unquote(lit)
if sectsub == "" {
if err := c.Collect(errfn("empty subsection name")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
}
if tok != token.RBRACK {
if sectsub == "" {
if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected right bracket")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
// If a section/subsection header was found, ensure a
// container object is created, even if there are no
// variables further down.
err := c.Collect(callback(sect, sectsub, "", "", true))
if err != nil {
return err
}
case token.IDENT:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
n := lit
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
if !blank {
if tok != token.ASSIGN {
if err := c.Collect(errfn("expected '='")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.STRING {
if err := c.Collect(errfn("expected value")); err != nil {
return err
}
}
v = unquote(lit)
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
}
err := c.Collect(callback(sect, sectsub, n, v, blank))
if err != nil {
return err
}
default:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected section header or variable declaration")); err != nil {
return err
}
}
}
panic("never reached")
}
func readInto(config interface{}, fset *token.FileSet, file *token.File,
src []byte) error {
//
c := warnings.NewCollector(isFatal)
firstPassCallback := func(s string, ss string, k string, v string, bv bool) error {
return set(c, config, s, ss, k, v, bv, false)
}
err := read(c, firstPassCallback, fset, file, src)
if err != nil {
return err
}
secondPassCallback := func(s string, ss string, k string, v string, bv bool) error {
return set(c, config, s, ss, k, v, bv, true)
}
err = read(c, secondPassCallback, fset, file, src)
if err != nil {
return err
}
return c.Done()
}
// ReadWithCallback reads gcfg formatted data from reader and calls
// callback with each section and option found.
//
// Callback is called with section, subsection, option key, option value
// and blank value flag as arguments.
//
// When a section is found, callback is called with nil subsection, option key
// and option value.
//
// When a subsection is found, callback is called with nil option key and
// option value.
//
// If blank value flag is true, it means that the value was not set for an option
// (as opposed to set to empty string).
//
// If callback returns an error, ReadWithCallback terminates with an error too.
func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
c := warnings.NewCollector(isFatal)
return read(c, callback, fset, file, src)
}
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
return readInto(config, fset, file, src)
}
// ReadStringInto reads gcfg formatted data from str and sets the values into
// the corresponding fields in config.
func ReadStringInto(config interface{}, str string) error {
r := strings.NewReader(str)
return ReadInto(config, r)
}
// ReadFileInto reads gcfg formatted data from the file filename and sets the
// values into the corresponding fields in config.
func ReadFileInto(config interface{}, filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile(filename, fset.Base(), len(src))
return readInto(config, fset, file, src)
}

121
vendor/github.com/src-d/gcfg/scanner/errors.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package scanner
import (
"fmt"
"io"
"sort"
)
import (
"github.com/src-d/gcfg/token"
)
// In an ErrorList, an error is represented by an *Error.
// The position Pos, if valid, points to the beginning of
// the offending token, and the error condition is described
// by Msg.
//
type Error struct {
Pos token.Position
Msg string
}
// Error implements the error interface.
func (e Error) Error() string {
if e.Pos.Filename != "" || e.Pos.IsValid() {
// don't print "<unknown position>"
// TODO(gri) reconsider the semantics of Position.IsValid
return e.Pos.String() + ": " + e.Msg
}
return e.Msg
}
// ErrorList is a list of *Errors.
// The zero value for an ErrorList is an empty ErrorList ready to use.
//
type ErrorList []*Error
// Add adds an Error with given position and error message to an ErrorList.
func (p *ErrorList) Add(pos token.Position, msg string) {
*p = append(*p, &Error{pos, msg})
}
// Reset resets an ErrorList to no errors.
func (p *ErrorList) Reset() { *p = (*p)[0:0] }
// ErrorList implements the sort Interface.
func (p ErrorList) Len() int { return len(p) }
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ErrorList) Less(i, j int) bool {
e := &p[i].Pos
f := &p[j].Pos
if e.Filename < f.Filename {
return true
}
if e.Filename == f.Filename {
return e.Offset < f.Offset
}
return false
}
// Sort sorts an ErrorList. *Error entries are sorted by position,
// other errors are sorted by error message, and before any *Error
// entry.
//
func (p ErrorList) Sort() {
sort.Sort(p)
}
// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
func (p *ErrorList) RemoveMultiples() {
sort.Sort(p)
var last token.Position // initial last.Line is != any legal error line
i := 0
for _, e := range *p {
if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
last = e.Pos
(*p)[i] = e
i++
}
}
(*p) = (*p)[0:i]
}
// An ErrorList implements the error interface.
func (p ErrorList) Error() string {
switch len(p) {
case 0:
return "no errors"
case 1:
return p[0].Error()
}
return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
}
// Err returns an error equivalent to this error list.
// If the list is empty, Err returns nil.
func (p ErrorList) Err() error {
if len(p) == 0 {
return nil
}
return p
}
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
//
func PrintError(w io.Writer, err error) {
if list, ok := err.(ErrorList); ok {
for _, e := range list {
fmt.Fprintf(w, "%s\n", e)
}
} else if err != nil {
fmt.Fprintf(w, "%s\n", err)
}
}

342
vendor/github.com/src-d/gcfg/scanner/scanner.go generated vendored Normal file
View File

@ -0,0 +1,342 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scanner implements a scanner for gcfg configuration text.
// It takes a []byte as source which can then be tokenized
// through repeated calls to the Scan method.
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
"fmt"
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"github.com/src-d/gcfg/token"
)
// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
dir string // directory portion of file.Name()
src []byte // source
err ErrorHandler // error reporting; or nil
mode Mode // scanning mode
// scanning state
ch rune // current character
offset int // character offset
rdOffset int // reading offset (position after current character)
lineOffset int // current line offset
nextVal bool // next token is expected to be a value
// public state - ok to modify
ErrorCount int // number of errors encountered
}
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
s.error(s.offset, "illegal character NUL")
case r >= 0x80:
// not ASCII
r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
s.error(s.offset, "illegal UTF-8 encoding")
}
}
s.rdOffset += w
s.ch = r
} else {
s.offset = len(s.src)
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
s.ch = -1 // eof
}
}
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
ScanComments Mode = 1 << iota // return comments as COMMENT tokens
)
// Init prepares the scanner s to tokenize the text src by setting the
// scanner at the beginning of src. The scanner uses the file set file
// for position information and it adds line information for each line.
// It is ok to re-use the same file when re-scanning the same file as
// line information which is already present is ignored. Init causes a
// panic if the file size does not match the src size.
//
// Calls to Scan will invoke the error handler err if they encounter a
// syntax error and err is not nil. Also, for each error encountered,
// the Scanner field ErrorCount is incremented by one. The mode parameter
// determines how comments are handled.
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
}
s.file = file
s.dir, _ = filepath.Split(file.Name())
s.src = src
s.err = err
s.mode = mode
s.ch = ' '
s.offset = 0
s.rdOffset = 0
s.lineOffset = 0
s.ErrorCount = 0
s.nextVal = false
s.next()
}
func (s *Scanner) error(offs int, msg string) {
if s.err != nil {
s.err(s.file.Position(s.file.Pos(offs)), msg)
}
s.ErrorCount++
}
func (s *Scanner) scanComment() string {
// initial [;#] already consumed
offs := s.offset - 1 // position of initial [;#]
for s.ch != '\n' && s.ch >= 0 {
s.next()
}
return string(s.src[offs:s.offset])
}
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
}
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
func (s *Scanner) scanIdentifier() string {
offs := s.offset
for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
s.next()
}
return string(s.src[offs:s.offset])
}
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
// ok
case 'n', 't', 'b':
if val {
break // ok
}
fallthrough
default:
s.error(offs, "unknown escape sequence")
}
}
func (s *Scanner) scanString() string {
// '"' opening already consumed
offs := s.offset - 1
for s.ch != '"' {
ch := s.ch
s.next()
if ch == '\n' || ch < 0 {
s.error(offs, "string not terminated")
break
}
if ch == '\\' {
s.scanEscape(false)
}
}
s.next()
return string(s.src[offs:s.offset])
}
func stripCR(b []byte) []byte {
c := make([]byte, len(b))
i := 0
for _, ch := range b {
if ch != '\r' {
c[i] = ch
i++
}
}
return c[:i]
}
func (s *Scanner) scanValString() string {
offs := s.offset
hasCR := false
end := offs
inQuote := false
loop:
for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
ch := s.ch
s.next()
switch {
case inQuote && ch == '\\':
s.scanEscape(true)
case !inQuote && ch == '\\':
if s.ch == '\r' {
hasCR = true
s.next()
}
if s.ch != '\n' {
s.scanEscape(true)
} else {
s.next()
}
case ch == '"':
inQuote = !inQuote
case ch == '\r':
hasCR = true
case ch < 0 || inQuote && ch == '\n':
s.error(offs, "string not terminated")
break loop
}
if inQuote || !isWhiteSpace(ch) {
end = s.offset
}
}
lit := s.src[offs:end]
if hasCR {
lit = stripCR(lit)
}
return string(lit)
}
func isWhiteSpace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\r'
}
func (s *Scanner) skipWhitespace() {
for isWhiteSpace(s.ch) {
s.next()
}
}
// Scan scans the next token and returns the token position, the token,
// and its literal string if applicable. The source end is indicated by
// token.EOF.
//
// If the returned token is a literal (token.IDENT, token.STRING) or
// token.COMMENT, the literal string has the corresponding value.
//
// If the returned token is token.ILLEGAL, the literal string is the
// offending character.
//
// In all other cases, Scan returns an empty literal string.
//
// For more tolerant parsing, Scan will return a valid token if
// possible even if a syntax error was encountered. Thus, even
// if the resulting token sequence contains no illegal tokens,
// a client may not assume that no error occurred. Instead it
// must check the scanner's ErrorCount or the number of calls
// of the error handler, if there was one installed.
//
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()
// current token start
pos = s.file.Pos(s.offset)
// determine token value
switch ch := s.ch; {
case s.nextVal:
lit = s.scanValString()
tok = token.STRING
s.nextVal = false
case isLetter(ch):
lit = s.scanIdentifier()
tok = token.IDENT
default:
s.next() // always make progress
switch ch {
case -1:
tok = token.EOF
case '\n':
tok = token.EOL
case '"':
tok = token.STRING
lit = s.scanString()
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case ';', '#':
// comment
lit = s.scanComment()
if s.mode&ScanComments == 0 {
// skip comment
goto scanAgain
}
tok = token.COMMENT
case '=':
tok = token.ASSIGN
s.nextVal = true
default:
s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
tok = token.ILLEGAL
lit = string(ch)
}
}
return
}

332
vendor/github.com/src-d/gcfg/set.go generated vendored Normal file
View File

@ -0,0 +1,332 @@
package gcfg
import (
"bytes"
"encoding/gob"
"fmt"
"math/big"
"reflect"
"strings"
"unicode"
"unicode/utf8"
"github.com/src-d/gcfg/types"
"gopkg.in/warnings.v0"
)
type tag struct {
ident string
intMode string
}
func newTag(ts string) tag {
t := tag{}
s := strings.Split(ts, ",")
t.ident = s[0]
for _, tse := range s[1:] {
if strings.HasPrefix(tse, "int=") {
t.intMode = tse[len("int="):]
}
}
return t
}
func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
var n string
r0, _ := utf8.DecodeRuneInString(name)
if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
n = "X"
}
n += strings.Replace(name, "-", "_", -1)
f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
if !v.FieldByName(fieldName).CanSet() {
return false
}
f, _ := v.Type().FieldByName(fieldName)
t := newTag(f.Tag.Get("gcfg"))
if t.ident != "" {
return strings.EqualFold(t.ident, name)
}
return strings.EqualFold(n, fieldName)
})
if !ok {
return reflect.Value{}, tag{}
}
return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
}
type setter func(destp interface{}, blank bool, val string, t tag) error
var errUnsupportedType = fmt.Errorf("unsupported type")
var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
var setters = []setter{
typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
if !ok {
return errUnsupportedType
}
if blank {
return errBlankUnsupported
}
return dtu.UnmarshalText([]byte(val))
}
func boolSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
return nil
}
b, err := types.ParseBool(val)
if err == nil {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
}
return err
}
func intMode(mode string) types.IntMode {
var m types.IntMode
if strings.ContainsAny(mode, "dD") {
m |= types.Dec
}
if strings.ContainsAny(mode, "hH") {
m |= types.Hex
}
if strings.ContainsAny(mode, "oO") {
m |= types.Oct
}
return m
}
var typeModes = map[reflect.Type]types.IntMode{
reflect.TypeOf(int(0)): types.Dec | types.Hex,
reflect.TypeOf(int8(0)): types.Dec | types.Hex,
reflect.TypeOf(int16(0)): types.Dec | types.Hex,
reflect.TypeOf(int32(0)): types.Dec | types.Hex,
reflect.TypeOf(int64(0)): types.Dec | types.Hex,
reflect.TypeOf(uint(0)): types.Dec | types.Hex,
reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
// use default mode (allow dec/hex/oct) for uintptr type
reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
}
func intModeDefault(t reflect.Type) types.IntMode {
m, ok := typeModes[t]
if !ok {
m = types.Dec | types.Hex | types.Oct
}
return m
}
func intSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
mode := intMode(t.intMode)
if mode == 0 {
mode = intModeDefault(reflect.TypeOf(d).Elem())
}
return types.ParseInt(d, val, mode)
}
func stringSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
dsp, ok := d.(*string)
if !ok {
return errUnsupportedType
}
*dsp = val
return nil
}
var kindSetters = map[reflect.Kind]setter{
reflect.String: stringSetter,
reflect.Bool: boolSetter,
reflect.Int: intSetter,
reflect.Int8: intSetter,
reflect.Int16: intSetter,
reflect.Int32: intSetter,
reflect.Int64: intSetter,
reflect.Uint: intSetter,
reflect.Uint8: intSetter,
reflect.Uint16: intSetter,
reflect.Uint32: intSetter,
reflect.Uint64: intSetter,
reflect.Uintptr: intSetter,
}
var typeSetters = map[reflect.Type]setter{
reflect.TypeOf(big.Int{}): intSetter,
}
func typeSetter(d interface{}, blank bool, val string, tt tag) error {
t := reflect.ValueOf(d).Type().Elem()
setter, ok := typeSetters[t]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func kindSetter(d interface{}, blank bool, val string, tt tag) error {
k := reflect.ValueOf(d).Type().Elem().Kind()
setter, ok := kindSetters[k]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func scanSetter(d interface{}, blank bool, val string, tt tag) error {
if blank {
return errBlankUnsupported
}
return types.ScanFully(d, val, 'v')
}
func newValue(c *warnings.Collector, sect string, vCfg reflect.Value,
vType reflect.Type) (reflect.Value, error) {
//
pv := reflect.New(vType)
dfltName := "default-" + sect
dfltField, _ := fieldFold(vCfg, dfltName)
var err error
if dfltField.IsValid() {
b := bytes.NewBuffer(nil)
ge := gob.NewEncoder(b)
if err = c.Collect(ge.EncodeValue(dfltField)); err != nil {
return pv, err
}
gd := gob.NewDecoder(bytes.NewReader(b.Bytes()))
if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil {
return pv, err
}
}
return pv, nil
}
func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
value string, blankValue bool, subsectPass bool) error {
//
vPCfg := reflect.ValueOf(cfg)
if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("config must be a pointer to a struct"))
}
vCfg := vPCfg.Elem()
vSect, _ := fieldFold(vCfg, sect)
if !vSect.IsValid() {
err := extraData{section: sect}
return c.Collect(err)
}
isSubsect := vSect.Kind() == reflect.Map
if subsectPass != isSubsect {
return nil
}
if isSubsect {
vst := vSect.Type()
if vst.Key().Kind() != reflect.String ||
vst.Elem().Kind() != reflect.Ptr ||
vst.Elem().Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("map field for section must have string keys and "+
" pointer-to-struct values: section %q", sect))
}
if vSect.IsNil() {
vSect.Set(reflect.MakeMap(vst))
}
k := reflect.ValueOf(sub)
pv := vSect.MapIndex(k)
if !pv.IsValid() {
vType := vSect.Type().Elem().Elem()
var err error
if pv, err = newValue(c, sect, vCfg, vType); err != nil {
return err
}
vSect.SetMapIndex(k, pv)
}
vSect = pv.Elem()
} else if vSect.Kind() != reflect.Struct {
panic(fmt.Errorf("field for section must be a map or a struct: "+
"section %q", sect))
} else if sub != "" {
err := extraData{section: sect, subsection: &sub}
return c.Collect(err)
}
// Empty name is a special value, meaning that only the
// section/subsection object is to be created, with no values set.
if name == "" {
return nil
}
vVar, t := fieldFold(vSect, name)
if !vVar.IsValid() {
var err error
if isSubsect {
err = extraData{section: sect, subsection: &sub, variable: &name}
} else {
err = extraData{section: sect, variable: &name}
}
return c.Collect(err)
}
// vVal is either single-valued var, or newly allocated value within multi-valued var
var vVal reflect.Value
// multi-value if unnamed slice type
isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
if isMulti && vVar.Kind() == reflect.Ptr {
if vVar.IsNil() {
vVar.Set(reflect.New(vVar.Type().Elem()))
}
vVar = vVar.Elem()
}
if isMulti && blankValue {
vVar.Set(reflect.Zero(vVar.Type()))
return nil
}
if isMulti {
vVal = reflect.New(vVar.Type().Elem()).Elem()
} else {
vVal = vVar
}
isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
isNew := isDeref && vVal.IsNil()
// vAddr is address of value to set (dereferenced & allocated as needed)
var vAddr reflect.Value
switch {
case isNew:
vAddr = reflect.New(vVal.Type().Elem())
case isDeref && !isNew:
vAddr = vVal
default:
vAddr = vVal.Addr()
}
vAddrI := vAddr.Interface()
err, ok := error(nil), false
for _, s := range setters {
err = s(vAddrI, blankValue, value, t)
if err == nil {
ok = true
break
}
if err != errUnsupportedType {
return err
}
}
if !ok {
// in case all setters returned errUnsupportedType
return err
}
if isNew { // set reference if it was dereferenced and newly allocated
vVal.Set(vAddr)
}
if isMulti { // append if multi-valued
vVar.Set(reflect.Append(vVar, vVal))
}
return nil
}

435
vendor/github.com/src-d/gcfg/token/position.go generated vendored Normal file
View File

@ -0,0 +1,435 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(gri) consider making this a separate package outside the go directory.
package token
import (
"fmt"
"sort"
"sync"
)
// -----------------------------------------------------------------------------
// Positions
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
//
type Position struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
//
func (pos Position) String() string {
s := pos.Filename
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
// Pos is a compact encoding of a source position within a file set.
// It can be converted into a Position for a more convenient, but much
// larger, representation.
//
// The Pos value for a given file is a number in the range [base, base+size],
// where base and size are specified when adding the file to the file set via
// AddFile.
//
// To create the Pos value for a specific source offset, first add
// the respective file to the current file set (via FileSet.AddFile)
// and then call File.Pos(offset) for that file. Given a Pos value p
// for a specific file set fset, the corresponding Position value is
// obtained by calling fset.Position(p).
//
// Pos values can be compared directly with the usual comparison operators:
// If two Pos values p and q are in the same file, comparing p and q is
// equivalent to comparing the respective source file offsets. If p and q
// are in different files, p < q is true if the file implied by p was added
// to the respective file set before the file implied by q.
//
type Pos int
// The zero value for Pos is NoPos; there is no file and line information
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
// for NoPos is the zero value for Position.
//
const NoPos Pos = 0
// IsValid returns true if the position is valid.
func (p Pos) IsValid() bool {
return p != NoPos
}
// -----------------------------------------------------------------------------
// File
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
//
type File struct {
set *FileSet
name string // file name as provided to AddFile
base int // Pos value range for this file is [base...base+size]
size int // file size as provided to AddFile
// lines and infos are protected by set.mutex
lines []int
infos []lineInfo
}
// Name returns the file name of file f as registered with AddFile.
func (f *File) Name() string {
return f.name
}
// Base returns the base offset of file f as registered with AddFile.
func (f *File) Base() int {
return f.base
}
// Size returns the size of file f as registered with AddFile.
func (f *File) Size() int {
return f.size
}
// LineCount returns the number of lines in file f.
func (f *File) LineCount() int {
f.set.mutex.RLock()
n := len(f.lines)
f.set.mutex.RUnlock()
return n
}
// AddLine adds the line offset for a new line.
// The line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise the line offset is ignored.
//
func (f *File) AddLine(offset int) {
f.set.mutex.Lock()
if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
f.lines = append(f.lines, offset)
}
f.set.mutex.Unlock()
}
// SetLines sets the line offsets for a file and returns true if successful.
// The line offsets are the offsets of the first character of each line;
// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
// An empty file has an empty line offset table.
// Each line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise SetLines fails and returns
// false.
//
func (f *File) SetLines(lines []int) bool {
// verify validity of lines table
size := f.size
for i, offset := range lines {
if i > 0 && offset <= lines[i-1] || size <= offset {
return false
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
return true
}
// SetLinesForContent sets the line offsets for the given file content.
func (f *File) SetLinesForContent(content []byte) {
var lines []int
line := 0
for offset, b := range content {
if line >= 0 {
lines = append(lines, line)
}
line = -1
if b == '\n' {
line = offset + 1
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
}
// A lineInfo object describes alternative file and line number
// information (such as provided via a //line comment in a .go
// file) for a given file offset.
type lineInfo struct {
// fields are exported to make them accessible to gob
Offset int
Filename string
Line int
}
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
// file size; otherwise the information is ignored.
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
}
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
//
func (f *File) Pos(offset int) Pos {
if offset > f.size {
panic("illegal file offset")
}
return Pos(f.base + offset)
}
// Offset returns the offset for the given file position p;
// p must be a valid Pos value in that file.
// f.Offset(f.Pos(offset)) == offset.
//
func (f *File) Offset(p Pos) int {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
return int(p) - f.base
}
// Line returns the line number for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Line(p Pos) int {
// TODO(gri) this can be implemented much more efficiently
return f.Position(p).Line
}
func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
// info returns the file name, line, and column number for a file offset.
func (f *File) info(offset int) (filename string, line, column int) {
filename = f.name
if i := searchInts(f.lines, offset); i >= 0 {
line, column = i+1, offset-f.lines[i]+1
}
if len(f.infos) > 0 {
// almost no files have extra line infos
if i := searchLineInfos(f.infos, offset); i >= 0 {
alt := &f.infos[i]
filename = alt.Filename
if i := searchInts(f.lines, alt.Offset); i >= 0 {
line += alt.Line - i - 1
}
}
}
return
}
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
pos.Filename, pos.Line, pos.Column = f.info(offset)
return
}
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
pos = f.position(p)
}
return
}
// -----------------------------------------------------------------------------
// FileSet
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
//
type FileSet struct {
mutex sync.RWMutex // protects the file set
base int // base offset for the next file
files []*File // list of files in the order added to the set
last *File // cache of last file looked up
}
// NewFileSet creates a new file set.
func NewFileSet() *FileSet {
s := new(FileSet)
s.base = 1 // 0 == NoPos
return s
}
// Base returns the minimum base offset that must be provided to
// AddFile when adding the next file.
//
func (s *FileSet) Base() int {
s.mutex.RLock()
b := s.base
s.mutex.RUnlock()
return b
}
// AddFile adds a new file with a given filename, base offset, and file size
// to the file set s and returns the file. Multiple files may have the same
// name. The base offset must not be smaller than the FileSet's Base(), and
// size must not be negative.
//
// Adding the file will set the file set's Base() value to base + size + 1
// as the minimum base value for the next file. The following relationship
// exists between a Pos value p for a given file offset offs:
//
// int(p) = base + offs
//
// with offs in the range [0, size] and thus p in the range [base, base+size].
// For convenience, File.Pos may be used to create file-specific position
// values from a file offset.
//
func (s *FileSet) AddFile(filename string, base, size int) *File {
s.mutex.Lock()
defer s.mutex.Unlock()
if base < s.base || size < 0 {
panic("illegal base or size")
}
// base >= s.base && size >= 0
f := &File{s, filename, base, size, []int{0}, nil}
base += size + 1 // +1 because EOF also has a position
if base < 0 {
panic("token.Pos offset overflow (> 2G of source code in file set)")
}
// add the file to the file set
s.base = base
s.files = append(s.files, f)
s.last = f
return f
}
// Iterate calls f for the files in the file set in the order they were added
// until f returns false.
//
func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ {
var file *File
s.mutex.RLock()
if i < len(s.files) {
file = s.files[i]
}
s.mutex.RUnlock()
if file == nil || !f(file) {
break
}
}
}
func searchFiles(a []*File, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
}
func (s *FileSet) file(p Pos) *File {
// common case: p is in last file
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
}
// p is not in last file - search all files
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
s.last = f
return f
}
}
return nil
}
// File returns the file that contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()
f = s.file(p)
s.mutex.RUnlock()
}
return
}
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
s.mutex.RUnlock()
}
return
}
// -----------------------------------------------------------------------------
// Helper functions
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
//
// With better compiler optimizations, this may not be needed in the
// future, but at the moment this change improves the go/printer
// benchmark performance by ~30%. This has a direct impact on the
// speed of gofmt and thus seems worthwhile (2011-04-29).
// TODO(gri): Remove this when compilers have caught up.
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2 // avoid overflow when computing h
// i ≤ h < j
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
}

56
vendor/github.com/src-d/gcfg/token/serialize.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
type serializedFile struct {
// fields correspond 1:1 to fields with same (lower-case) name in File
Name string
Base int
Size int
Lines []int
Infos []lineInfo
}
type serializedFileSet struct {
Base int
Files []serializedFile
}
// Read calls decode to deserialize a file set into s; s must not be nil.
func (s *FileSet) Read(decode func(interface{}) error) error {
var ss serializedFileSet
if err := decode(&ss); err != nil {
return err
}
s.mutex.Lock()
s.base = ss.Base
files := make([]*File, len(ss.Files))
for i := 0; i < len(ss.Files); i++ {
f := &ss.Files[i]
files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
}
s.files = files
s.last = nil
s.mutex.Unlock()
return nil
}
// Write calls encode to serialize the file set s.
func (s *FileSet) Write(encode func(interface{}) error) error {
var ss serializedFileSet
s.mutex.Lock()
ss.Base = s.base
files := make([]serializedFile, len(s.files))
for i, f := range s.files {
files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
}
ss.Files = files
s.mutex.Unlock()
return encode(ss)
}

83
vendor/github.com/src-d/gcfg/token/token.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package token defines constants representing the lexical tokens of the gcfg
// configuration syntax and basic operations on tokens (printing, predicates).
//
// Note that the API for the token package may change to accommodate new
// features or implementation changes in gcfg.
//
package token
import "strconv"
// Token is the set of lexical tokens of the gcfg configuration syntax.
type Token int
// The list of tokens.
const (
// Special tokens
ILLEGAL Token = iota
EOF
COMMENT
literal_beg
// Identifiers and basic type literals
// (these tokens stand for classes of literals)
IDENT // section-name, variable-name
STRING // "subsection-name", variable value
literal_end
operator_beg
// Operators and delimiters
ASSIGN // =
LBRACK // [
RBRACK // ]
EOL // \n
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
STRING: "STRING",
ASSIGN: "=",
LBRACK: "[",
RBRACK: "]",
EOL: "\n",
}
// String returns the string corresponding to the token tok.
// For operators and delimiters, the string is the actual token character
// sequence (e.g., for the token ASSIGN, the string is "="). For all other
// tokens the string corresponds to the token constant name (e.g. for the
// token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
s := ""
if 0 <= tok && tok < Token(len(tokens)) {
s = tokens[tok]
}
if s == "" {
s = "token(" + strconv.Itoa(int(tok)) + ")"
}
return s
}
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }

23
vendor/github.com/src-d/gcfg/types/bool.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
// BoolValues defines the name and value mappings for ParseBool.
var BoolValues = map[string]interface{}{
"true": true, "yes": true, "on": true, "1": true,
"false": false, "no": false, "off": false, "0": false,
}
var boolParser = func() *EnumParser {
ep := &EnumParser{}
ep.AddVals(BoolValues)
return ep
}()
// ParseBool parses bool values according to the definitions in BoolValues.
// Parsing is case-insensitive.
func ParseBool(s string) (bool, error) {
v, err := boolParser.Parse(s)
if err != nil {
return false, err
}
return v.(bool), nil
}

4
vendor/github.com/src-d/gcfg/types/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
// Package types defines helpers for type conversions.
//
// The API for this package is not finalized yet.
package types

44
vendor/github.com/src-d/gcfg/types/enum.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package types
import (
"fmt"
"reflect"
"strings"
)
// EnumParser parses "enum" values; i.e. a predefined set of strings to
// predefined values.
type EnumParser struct {
Type string // type name; if not set, use type of first value added
CaseMatch bool // if true, matching of strings is case-sensitive
// PrefixMatch bool
vals map[string]interface{}
}
// AddVals adds strings and values to an EnumParser.
func (ep *EnumParser) AddVals(vals map[string]interface{}) {
if ep.vals == nil {
ep.vals = make(map[string]interface{})
}
for k, v := range vals {
if ep.Type == "" {
ep.Type = reflect.TypeOf(v).Name()
}
if !ep.CaseMatch {
k = strings.ToLower(k)
}
ep.vals[k] = v
}
}
// Parse parses the string and returns the value or an error.
func (ep EnumParser) Parse(s string) (interface{}, error) {
if !ep.CaseMatch {
s = strings.ToLower(s)
}
v, ok := ep.vals[s]
if !ok {
return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
}
return v, nil
}

86
vendor/github.com/src-d/gcfg/types/int.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package types
import (
"fmt"
"strings"
)
// An IntMode is a mode for parsing integer values, representing a set of
// accepted bases.
type IntMode uint8
// IntMode values for ParseInt; can be combined using binary or.
const (
Dec IntMode = 1 << iota
Hex
Oct
)
// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
func (m IntMode) String() string {
var modes []string
if m&Dec != 0 {
modes = append(modes, "Dec")
}
if m&Hex != 0 {
modes = append(modes, "Hex")
}
if m&Oct != 0 {
modes = append(modes, "Oct")
}
return "IntMode(" + strings.Join(modes, "|") + ")"
}
var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
func prefix0(val string) bool {
return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
}
func prefix0x(val string) bool {
return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
}
// ParseInt parses val using mode into intptr, which must be a pointer to an
// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
// when mode permits ambiguity of base; otherwise the prefix can be omitted.
func ParseInt(intptr interface{}, val string, mode IntMode) error {
val = strings.TrimSpace(val)
verb := byte(0)
switch mode {
case Dec:
verb = 'd'
case Dec + Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Oct:
if prefix0(val) && !prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Hex + Oct:
verb = 'v'
case Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'x'
}
case Oct:
verb = 'o'
case Hex + Oct:
if prefix0(val) {
verb = 'v'
} else {
return errIntAmbig
}
}
if verb == 0 {
panic("unsupported mode")
}
return ScanFully(intptr, val, verb)
}

23
vendor/github.com/src-d/gcfg/types/scan.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
import (
"fmt"
"io"
"reflect"
)
// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
func ScanFully(ptr interface{}, val string, verb byte) error {
t := reflect.ValueOf(ptr).Elem().Type()
// attempt to read extra bytes to make sure the value is consumed
var b []byte
n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
switch {
case n < 1 || n == 1 && err != io.EOF:
return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
case n > 1:
return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
}
// n == 1 && err == io.EOF
return nil
}

202
vendor/github.com/xanzy/ssh-agent/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

146
vendor/github.com/xanzy/ssh-agent/pageant_windows.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
//
// Copyright (c) 2014 David Mzareulyan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
// and associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial
// portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// +build windows
package sshagent
// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155
// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py
import (
"encoding/binary"
"errors"
"fmt"
"sync"
"syscall"
"unsafe"
)
// Maximum size of message can be sent to pageant
const MaxMessageLen = 8192
var (
ErrPageantNotFound = errors.New("pageant process not found")
ErrSendMessage = errors.New("error sending message")
ErrMessageTooLong = errors.New("message too long")
ErrInvalidMessageFormat = errors.New("invalid message format")
ErrResponseTooLong = errors.New("response too long")
)
const (
agentCopydataID = 0x804e50ba
wmCopydata = 74
)
type copyData struct {
dwData uintptr
cbData uint32
lpData unsafe.Pointer
}
var (
lock sync.Mutex
winFindWindow = winAPI("user32.dll", "FindWindowW")
winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId")
winSendMessage = winAPI("user32.dll", "SendMessageW")
)
func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) {
proc := syscall.MustLoadDLL(dllName).MustFindProc(funcName)
return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) }
}
// Available returns true if Pageant is running
func Available() bool { return pageantWindow() != 0 }
// Query sends message msg to Pageant and returns response or error.
// 'msg' is raw agent request with length prefix
// Response is raw agent response with length prefix
func query(msg []byte) ([]byte, error) {
if len(msg) > MaxMessageLen {
return nil, ErrMessageTooLong
}
msgLen := binary.BigEndian.Uint32(msg[:4])
if len(msg) != int(msgLen)+4 {
return nil, ErrInvalidMessageFormat
}
lock.Lock()
defer lock.Unlock()
paWin := pageantWindow()
if paWin == 0 {
return nil, ErrPageantNotFound
}
thID, _, _ := winGetCurrentThreadID()
mapName := fmt.Sprintf("PageantRequest%08x", thID)
pMapName, _ := syscall.UTF16PtrFromString(mapName)
mmap, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, MaxMessageLen+4, pMapName)
if err != nil {
return nil, err
}
defer syscall.CloseHandle(mmap)
ptr, err := syscall.MapViewOfFile(mmap, syscall.FILE_MAP_WRITE, 0, 0, 0)
if err != nil {
return nil, err
}
defer syscall.UnmapViewOfFile(ptr)
mmSlice := (*(*[MaxMessageLen]byte)(unsafe.Pointer(ptr)))[:]
copy(mmSlice, msg)
mapNameBytesZ := append([]byte(mapName), 0)
cds := copyData{
dwData: agentCopydataID,
cbData: uint32(len(mapNameBytesZ)),
lpData: unsafe.Pointer(&(mapNameBytesZ[0])),
}
resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(unsafe.Pointer(&cds)))
if resp == 0 {
return nil, ErrSendMessage
}
respLen := binary.BigEndian.Uint32(mmSlice[:4])
if respLen > MaxMessageLen-4 {
return nil, ErrResponseTooLong
}
respData := make([]byte, respLen+4)
copy(respData, mmSlice)
return respData, nil
}
func pageantWindow() uintptr {
nameP, _ := syscall.UTF16PtrFromString("Pageant")
h, _, _ := winFindWindow(uintptr(unsafe.Pointer(nameP)), uintptr(unsafe.Pointer(nameP)))
return h
}

49
vendor/github.com/xanzy/ssh-agent/sshagent.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
//
// Copyright 2015, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package sshagent
import (
"errors"
"fmt"
"net"
"os"
"golang.org/x/crypto/ssh/agent"
)
// New returns a new agent.Agent that uses a unix socket
func New() (agent.Agent, net.Conn, error) {
if !Available() {
return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified")
}
sshAuthSock := os.Getenv("SSH_AUTH_SOCK")
conn, err := net.Dial("unix", sshAuthSock)
if err != nil {
return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err)
}
return agent.NewClient(conn), conn, nil
}
// Available returns true is a auth socket is defined
func Available() bool {
return os.Getenv("SSH_AUTH_SOCK") != ""
}

80
vendor/github.com/xanzy/ssh-agent/sshagent_windows.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
//
// Copyright (c) 2014 David Mzareulyan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
// and associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial
// portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// +build windows
package sshagent
import (
"errors"
"io"
"net"
"sync"
"golang.org/x/crypto/ssh/agent"
)
// New returns a new agent.Agent and the (custom) connection it uses
// to communicate with a running pagent.exe instance (see README.md)
func New() (agent.Agent, net.Conn, error) {
if !Available() {
return nil, nil, errors.New("SSH agent requested but Pageant not running")
}
return agent.NewClient(&conn{}), nil, nil
}
type conn struct {
sync.Mutex
buf []byte
}
func (c *conn) Close() {
c.Lock()
defer c.Unlock()
c.buf = nil
}
func (c *conn) Write(p []byte) (int, error) {
c.Lock()
defer c.Unlock()
resp, err := query(p)
if err != nil {
return 0, err
}
c.buf = append(c.buf, resp...)
return len(p), nil
}
func (c *conn) Read(p []byte) (int, error) {
c.Lock()
defer c.Unlock()
if len(c.buf) == 0 {
return 0, io.EOF
}
n := copy(p, c.buf)
c.buf = c.buf[n:]
return n, nil
}

526
vendor/golang.org/x/crypto/cast5/cast5.go generated vendored Normal file
View File

@ -0,0 +1,526 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
// OpenPGP cipher.
package cast5 // import "golang.org/x/crypto/cast5"
import "errors"
const BlockSize = 8
const KeySize = 16
type Cipher struct {
masking [16]uint32
rotate [16]uint8
}
func NewCipher(key []byte) (c *Cipher, err error) {
if len(key) != KeySize {
return nil, errors.New("CAST5: keys must be 16 bytes")
}
c = new(Cipher)
c.keySchedule(key)
return
}
func (c *Cipher) BlockSize() int {
return BlockSize
}
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
type keyScheduleA [4][7]uint8
type keyScheduleB [4][5]uint8
// keyScheduleRound contains the magic values for a round of the key schedule.
// The keyScheduleA deals with the lines like:
// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
// Conceptually, both x and z are in the same array, x first. The first
// element describes which word of this array gets written to and the
// second, which word gets read. So, for the line above, it's "4, 0", because
// it's writing to the first word of z, which, being after x, is word 4, and
// reading from the first word of x: word 0.
//
// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
// that it's z that we're indexing.
//
// keyScheduleB deals with lines like:
// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
// "K1" is ignored because key words are always written in order. So the five
// elements are the S-box indexes. They use the same form as in keyScheduleA,
// above.
type keyScheduleRound struct{}
type keySchedule []keyScheduleRound
var schedule = []struct {
a keyScheduleA
b keyScheduleB
}{
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
{16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
{16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
{16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{3, 2, 0xc, 0xd, 8},
{1, 0, 0xe, 0xf, 0xd},
{7, 6, 8, 9, 3},
{5, 4, 0xa, 0xb, 7},
},
},
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
{16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
{16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
{16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{8, 9, 7, 6, 3},
{0xa, 0xb, 5, 4, 7},
{0xc, 0xd, 3, 2, 8},
{0xe, 0xf, 1, 0, 0xd},
},
},
}
func (c *Cipher) keySchedule(in []byte) {
var t [8]uint32
var k [32]uint32
for i := 0; i < 4; i++ {
j := i * 4
t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
}
x := []byte{6, 7, 4, 5}
ki := 0
for half := 0; half < 2; half++ {
for _, round := range schedule {
for j := 0; j < 4; j++ {
var a [7]uint8
copy(a[:], round.a[j][:])
w := t[a[1]]
w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
t[a[0]] = w
}
for j := 0; j < 4; j++ {
var b [5]uint8
copy(b[:], round.b[j][:])
w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
k[ki] = w
ki++
}
}
}
for i := 0; i < 16; i++ {
c.masking[i] = k[i]
c.rotate[i] = uint8(k[16+i] & 0x1f)
}
}
// These are the three 'f' functions. See RFC 2144, section 2.2.
func f1(d, m uint32, r uint8) uint32 {
t := m + d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
}
func f2(d, m uint32, r uint8) uint32 {
t := m ^ d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
}
func f3(d, m uint32, r uint8) uint32 {
t := m - d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
}
var sBox = [8][256]uint32{
{
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
},
{
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
},
{
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
},
{
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
},
{
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
},
{
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
},
{
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
},
{
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
},
}

219
vendor/golang.org/x/crypto/openpgp/armor/armor.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
// very similar to PEM except that it has an additional CRC checksum.
package armor // import "golang.org/x/crypto/openpgp/armor"
import (
"bufio"
"bytes"
"encoding/base64"
"golang.org/x/crypto/openpgp/errors"
"io"
)
// A Block represents an OpenPGP armored structure.
//
// The encoded form is:
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
// where Headers is a possibly empty sequence of Key: Value lines.
//
// Since the armored data can be very large, this package presents a streaming
// interface.
type Block struct {
Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
Header map[string]string // Optional headers.
Body io.Reader // A Reader from which the contents can be read
lReader lineReader
oReader openpgpReader
}
var ArmorCorrupt error = errors.StructuralError("armor invalid")
const crc24Init = 0xb704ce
const crc24Poly = 0x1864cfb
const crc24Mask = 0xffffff
// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
func crc24(crc uint32, d []byte) uint32 {
for _, b := range d {
crc ^= uint32(b) << 16
for i := 0; i < 8; i++ {
crc <<= 1
if crc&0x1000000 != 0 {
crc ^= crc24Poly
}
}
}
return crc
}
var armorStart = []byte("-----BEGIN ")
var armorEnd = []byte("-----END ")
var armorEndOfLine = []byte("-----")
// lineReader wraps a line based reader. It watches for the end of an armor
// block and records the expected CRC value.
type lineReader struct {
in *bufio.Reader
buf []byte
eof bool
crc uint32
}
func (l *lineReader) Read(p []byte) (n int, err error) {
if l.eof {
return 0, io.EOF
}
if len(l.buf) > 0 {
n = copy(p, l.buf)
l.buf = l.buf[n:]
return
}
line, isPrefix, err := l.in.ReadLine()
if err != nil {
return
}
if isPrefix {
return 0, ArmorCorrupt
}
if len(line) == 5 && line[0] == '=' {
// This is the checksum line
var expectedBytes [3]byte
var m int
m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
if m != 3 || err != nil {
return
}
l.crc = uint32(expectedBytes[0])<<16 |
uint32(expectedBytes[1])<<8 |
uint32(expectedBytes[2])
line, _, err = l.in.ReadLine()
if err != nil && err != io.EOF {
return
}
if !bytes.HasPrefix(line, armorEnd) {
return 0, ArmorCorrupt
}
l.eof = true
return 0, io.EOF
}
if len(line) > 96 {
return 0, ArmorCorrupt
}
n = copy(p, line)
bytesToSave := len(line) - n
if bytesToSave > 0 {
if cap(l.buf) < bytesToSave {
l.buf = make([]byte, 0, bytesToSave)
}
l.buf = l.buf[0:bytesToSave]
copy(l.buf, line[n:])
}
return
}
// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
// a running CRC of the resulting data and checks the CRC against the value
// found by the lineReader at EOF.
type openpgpReader struct {
lReader *lineReader
b64Reader io.Reader
currentCRC uint32
}
func (r *openpgpReader) Read(p []byte) (n int, err error) {
n, err = r.b64Reader.Read(p)
r.currentCRC = crc24(r.currentCRC, p[:n])
if err == io.EOF {
if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
return 0, ArmorCorrupt
}
}
return
}
// Decode reads a PGP armored block from the given Reader. It will ignore
// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
// given Reader is not usable after calling this function: an arbitrary amount
// of data may have been read past the end of the block.
func Decode(in io.Reader) (p *Block, err error) {
r := bufio.NewReaderSize(in, 100)
var line []byte
ignoreNext := false
TryNextBlock:
p = nil
// Skip leading garbage
for {
ignoreThis := ignoreNext
line, ignoreNext, err = r.ReadLine()
if err != nil {
return
}
if ignoreNext || ignoreThis {
continue
}
line = bytes.TrimSpace(line)
if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
break
}
}
p = new(Block)
p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
p.Header = make(map[string]string)
nextIsContinuation := false
var lastKey string
// Read headers
for {
isContinuation := nextIsContinuation
line, nextIsContinuation, err = r.ReadLine()
if err != nil {
p = nil
return
}
if isContinuation {
p.Header[lastKey] += string(line)
continue
}
line = bytes.TrimSpace(line)
if len(line) == 0 {
break
}
i := bytes.Index(line, []byte(": "))
if i == -1 {
goto TryNextBlock
}
lastKey = string(line[:i])
p.Header[lastKey] = string(line[i+2:])
}
p.lReader.in = r
p.oReader.currentCRC = crc24Init
p.oReader.lReader = &p.lReader
p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
p.Body = &p.oReader
return
}

160
vendor/golang.org/x/crypto/openpgp/armor/encode.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armor
import (
"encoding/base64"
"io"
)
var armorHeaderSep = []byte(": ")
var blockEnd = []byte("\n=")
var newline = []byte("\n")
var armorEndOfLineOut = []byte("-----\n")
// writeSlices writes its arguments to the given Writer.
func writeSlices(out io.Writer, slices ...[]byte) (err error) {
for _, s := range slices {
_, err = out.Write(s)
if err != nil {
return err
}
}
return
}
// lineBreaker breaks data across several lines, all of the same byte length
// (except possibly the last). Lines are broken with a single '\n'.
type lineBreaker struct {
lineLength int
line []byte
used int
out io.Writer
haveWritten bool
}
func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
return &lineBreaker{
lineLength: lineLength,
line: make([]byte, lineLength),
used: 0,
out: out,
}
}
func (l *lineBreaker) Write(b []byte) (n int, err error) {
n = len(b)
if n == 0 {
return
}
if l.used == 0 && l.haveWritten {
_, err = l.out.Write([]byte{'\n'})
if err != nil {
return
}
}
if l.used+len(b) < l.lineLength {
l.used += copy(l.line[l.used:], b)
return
}
l.haveWritten = true
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
excess := l.lineLength - l.used
l.used = 0
_, err = l.out.Write(b[0:excess])
if err != nil {
return
}
_, err = l.Write(b[excess:])
return
}
func (l *lineBreaker) Close() (err error) {
if l.used > 0 {
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
}
return
}
// encoding keeps track of a running CRC24 over the data which has been written
// to it and outputs a OpenPGP checksum when closed, followed by an armor
// trailer.
//
// It's built into a stack of io.Writers:
// encoding -> base64 encoder -> lineBreaker -> out
type encoding struct {
out io.Writer
breaker *lineBreaker
b64 io.WriteCloser
crc uint32
blockType []byte
}
func (e *encoding) Write(data []byte) (n int, err error) {
e.crc = crc24(e.crc, data)
return e.b64.Write(data)
}
func (e *encoding) Close() (err error) {
err = e.b64.Close()
if err != nil {
return
}
e.breaker.Close()
var checksumBytes [3]byte
checksumBytes[0] = byte(e.crc >> 16)
checksumBytes[1] = byte(e.crc >> 8)
checksumBytes[2] = byte(e.crc)
var b64ChecksumBytes [4]byte
base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
}
// Encode returns a WriteCloser which will encode the data written to it in
// OpenPGP armor.
func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
bType := []byte(blockType)
err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
if err != nil {
return
}
for k, v := range headers {
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
if err != nil {
return
}
}
_, err = out.Write(newline)
if err != nil {
return
}
e := &encoding{
out: out,
breaker: newLineBreaker(out, 64),
crc: crc24Init,
blockType: bType,
}
e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
return e, nil
}

59
vendor/golang.org/x/crypto/openpgp/canonical_text.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import "hash"
// NewCanonicalTextHash reformats text written to it into the canonical
// form and then applies the hash h. See RFC 4880, section 5.2.1.
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
return &canonicalTextHash{h, 0}
}
type canonicalTextHash struct {
h hash.Hash
s int
}
var newline = []byte{'\r', '\n'}
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
start := 0
for i, c := range buf {
switch cth.s {
case 0:
if c == '\r' {
cth.s = 1
} else if c == '\n' {
cth.h.Write(buf[start:i])
cth.h.Write(newline)
start = i + 1
}
case 1:
cth.s = 0
}
}
cth.h.Write(buf[start:])
return len(buf), nil
}
func (cth *canonicalTextHash) Sum(in []byte) []byte {
return cth.h.Sum(in)
}
func (cth *canonicalTextHash) Reset() {
cth.h.Reset()
cth.s = 0
}
func (cth *canonicalTextHash) Size() int {
return cth.h.Size()
}
func (cth *canonicalTextHash) BlockSize() int {
return cth.h.BlockSize()
}

122
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
// n. 4, 1985, pp. 469-472.
//
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
// unsuitable for other protocols. RSA should be used in preference in any
// case.
package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
import (
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math/big"
)
// PublicKey represents an ElGamal public key.
type PublicKey struct {
G, P, Y *big.Int
}
// PrivateKey represents an ElGamal private key.
type PrivateKey struct {
PublicKey
X *big.Int
}
// Encrypt encrypts the given message to the given public key. The result is a
// pair of integers. Errors can result from reading random, or because msg is
// too large to be encrypted to the public key.
func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
pLen := (pub.P.BitLen() + 7) / 8
if len(msg) > pLen-11 {
err = errors.New("elgamal: message too long")
return
}
// EM = 0x02 || PS || 0x00 || M
em := make([]byte, pLen-1)
em[0] = 2
ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
err = nonZeroRandomBytes(ps, random)
if err != nil {
return
}
em[len(em)-len(msg)-1] = 0
copy(mm, msg)
m := new(big.Int).SetBytes(em)
k, err := rand.Int(random, pub.P)
if err != nil {
return
}
c1 = new(big.Int).Exp(pub.G, k, pub.P)
s := new(big.Int).Exp(pub.Y, k, pub.P)
c2 = s.Mul(s, m)
c2.Mod(c2, pub.P)
return
}
// Decrypt takes two integers, resulting from an ElGamal encryption, and
// returns the plaintext of the message. An error can result only if the
// ciphertext is invalid. Users should keep in mind that this is a padding
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)
s.ModInverse(s, priv.P)
s.Mul(s, c2)
s.Mod(s, priv.P)
em := s.Bytes()
firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
// The remainder of the plaintext must be a string of non-zero random
// octets, followed by a 0, followed by the message.
// lookingForIndex: 1 iff we are still looking for the zero.
// index: the offset of the first zero byte.
var lookingForIndex, index int
lookingForIndex = 1
for i := 1; i < len(em); i++ {
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
}
if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
return nil, errors.New("elgamal: decryption error")
}
return em[index+1:], nil
}
// nonZeroRandomBytes fills the given slice with non-zero random octets.
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
_, err = io.ReadFull(rand, s)
if err != nil {
return
}
for i := 0; i < len(s); i++ {
for s[i] == 0 {
_, err = io.ReadFull(rand, s[i:i+1])
if err != nil {
return
}
}
}
return
}

72
vendor/golang.org/x/crypto/openpgp/errors/errors.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors contains common error types for the OpenPGP packages.
package errors // import "golang.org/x/crypto/openpgp/errors"
import (
"strconv"
)
// A StructuralError is returned when OpenPGP data is found to be syntactically
// invalid.
type StructuralError string
func (s StructuralError) Error() string {
return "openpgp: invalid data: " + string(s)
}
// UnsupportedError indicates that, although the OpenPGP data is valid, it
// makes use of currently unimplemented features.
type UnsupportedError string
func (s UnsupportedError) Error() string {
return "openpgp: unsupported feature: " + string(s)
}
// InvalidArgumentError indicates that the caller is in error and passed an
// incorrect value.
type InvalidArgumentError string
func (i InvalidArgumentError) Error() string {
return "openpgp: invalid argument: " + string(i)
}
// SignatureError indicates that a syntactically valid signature failed to
// validate.
type SignatureError string
func (b SignatureError) Error() string {
return "openpgp: invalid signature: " + string(b)
}
type keyIncorrectError int
func (ki keyIncorrectError) Error() string {
return "openpgp: incorrect key"
}
var ErrKeyIncorrect error = keyIncorrectError(0)
type unknownIssuerError int
func (unknownIssuerError) Error() string {
return "openpgp: signature made by unknown entity"
}
var ErrUnknownIssuer error = unknownIssuerError(0)
type keyRevokedError int
func (keyRevokedError) Error() string {
return "openpgp: signature made by revoked key"
}
var ErrKeyRevoked error = keyRevokedError(0)
type UnknownPacketTypeError uint8
func (upte UnknownPacketTypeError) Error() string {
return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
}

641
vendor/golang.org/x/crypto/openpgp/keys.go generated vendored Normal file
View File

@ -0,0 +1,641 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import (
"crypto/rsa"
"io"
"time"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
)
// PublicKeyType is the armor type for a PGP public key.
var PublicKeyType = "PGP PUBLIC KEY BLOCK"
// PrivateKeyType is the armor type for a PGP private key.
var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
// An Entity represents the components of an OpenPGP key: a primary public key
// (which must be a signing key), one or more identities claimed by that key,
// and zero or more subkeys, which may be encryption keys.
type Entity struct {
PrimaryKey *packet.PublicKey
PrivateKey *packet.PrivateKey
Identities map[string]*Identity // indexed by Identity.Name
Revocations []*packet.Signature
Subkeys []Subkey
}
// An Identity represents an identity claimed by an Entity and zero or more
// assertions by other entities about that claim.
type Identity struct {
Name string // by convention, has the form "Full Name (comment) <email@example.com>"
UserId *packet.UserId
SelfSignature *packet.Signature
Signatures []*packet.Signature
}
// A Subkey is an additional public key in an Entity. Subkeys can be used for
// encryption.
type Subkey struct {
PublicKey *packet.PublicKey
PrivateKey *packet.PrivateKey
Sig *packet.Signature
}
// A Key identifies a specific public key in an Entity. This is either the
// Entity's primary key or a subkey.
type Key struct {
Entity *Entity
PublicKey *packet.PublicKey
PrivateKey *packet.PrivateKey
SelfSignature *packet.Signature
}
// A KeyRing provides access to public and private keys.
type KeyRing interface {
// KeysById returns the set of keys that have the given key id.
KeysById(id uint64) []Key
// KeysByIdAndUsage returns the set of keys with the given id
// that also meet the key usage given by requiredUsage.
// The requiredUsage is expressed as the bitwise-OR of
// packet.KeyFlag* values.
KeysByIdUsage(id uint64, requiredUsage byte) []Key
// DecryptionKeys returns all private keys that are valid for
// decryption.
DecryptionKeys() []Key
}
// primaryIdentity returns the Identity marked as primary or the first identity
// if none are so marked.
func (e *Entity) primaryIdentity() *Identity {
var firstIdentity *Identity
for _, ident := range e.Identities {
if firstIdentity == nil {
firstIdentity = ident
}
if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
return ident
}
}
return firstIdentity
}
// encryptionKey returns the best candidate Key for encrypting a message to the
// given Entity.
func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
candidateSubkey := -1
// Iterate the keys to find the newest key
var maxTime time.Time
for i, subkey := range e.Subkeys {
if subkey.Sig.FlagsValid &&
subkey.Sig.FlagEncryptCommunications &&
subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
!subkey.Sig.KeyExpired(now) &&
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
candidateSubkey = i
maxTime = subkey.Sig.CreationTime
}
}
if candidateSubkey != -1 {
subkey := e.Subkeys[candidateSubkey]
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
}
// If we don't have any candidate subkeys for encryption and
// the primary key doesn't have any usage metadata then we
// assume that the primary key is ok. Or, if the primary key is
// marked as ok to encrypt to, then we can obviously use it.
i := e.primaryIdentity()
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
!i.SelfSignature.KeyExpired(now) {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
}
// This Entity appears to be signing only.
return Key{}, false
}
// signingKey return the best candidate Key for signing a message with this
// Entity.
func (e *Entity) signingKey(now time.Time) (Key, bool) {
candidateSubkey := -1
for i, subkey := range e.Subkeys {
if subkey.Sig.FlagsValid &&
subkey.Sig.FlagSign &&
subkey.PublicKey.PubKeyAlgo.CanSign() &&
!subkey.Sig.KeyExpired(now) {
candidateSubkey = i
break
}
}
if candidateSubkey != -1 {
subkey := e.Subkeys[candidateSubkey]
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
}
// If we have no candidate subkey then we assume that it's ok to sign
// with the primary key.
i := e.primaryIdentity()
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
!i.SelfSignature.KeyExpired(now) {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
}
return Key{}, false
}
// An EntityList contains one or more Entities.
type EntityList []*Entity
// KeysById returns the set of keys that have the given key id.
func (el EntityList) KeysById(id uint64) (keys []Key) {
for _, e := range el {
if e.PrimaryKey.KeyId == id {
var selfSig *packet.Signature
for _, ident := range e.Identities {
if selfSig == nil {
selfSig = ident.SelfSignature
} else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
selfSig = ident.SelfSignature
break
}
}
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
}
for _, subKey := range e.Subkeys {
if subKey.PublicKey.KeyId == id {
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
}
}
}
return
}
// KeysByIdAndUsage returns the set of keys with the given id that also meet
// the key usage given by requiredUsage. The requiredUsage is expressed as
// the bitwise-OR of packet.KeyFlag* values.
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
for _, key := range el.KeysById(id) {
if len(key.Entity.Revocations) > 0 {
continue
}
if key.SelfSignature.RevocationReason != nil {
continue
}
if key.SelfSignature.FlagsValid && requiredUsage != 0 {
var usage byte
if key.SelfSignature.FlagCertify {
usage |= packet.KeyFlagCertify
}
if key.SelfSignature.FlagSign {
usage |= packet.KeyFlagSign
}
if key.SelfSignature.FlagEncryptCommunications {
usage |= packet.KeyFlagEncryptCommunications
}
if key.SelfSignature.FlagEncryptStorage {
usage |= packet.KeyFlagEncryptStorage
}
if usage&requiredUsage != requiredUsage {
continue
}
}
keys = append(keys, key)
}
return
}
// DecryptionKeys returns all private keys that are valid for decryption.
func (el EntityList) DecryptionKeys() (keys []Key) {
for _, e := range el {
for _, subKey := range e.Subkeys {
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
}
}
}
return
}
// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
block, err := armor.Decode(r)
if err == io.EOF {
return nil, errors.InvalidArgumentError("no armored data found")
}
if err != nil {
return nil, err
}
if block.Type != PublicKeyType && block.Type != PrivateKeyType {
return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
}
return ReadKeyRing(block.Body)
}
// ReadKeyRing reads one or more public/private keys. Unsupported keys are
// ignored as long as at least a single valid key is found.
func ReadKeyRing(r io.Reader) (el EntityList, err error) {
packets := packet.NewReader(r)
var lastUnsupportedError error
for {
var e *Entity
e, err = ReadEntity(packets)
if err != nil {
// TODO: warn about skipped unsupported/unreadable keys
if _, ok := err.(errors.UnsupportedError); ok {
lastUnsupportedError = err
err = readToNextPublicKey(packets)
} else if _, ok := err.(errors.StructuralError); ok {
// Skip unreadable, badly-formatted keys
lastUnsupportedError = err
err = readToNextPublicKey(packets)
}
if err == io.EOF {
err = nil
break
}
if err != nil {
el = nil
break
}
} else {
el = append(el, e)
}
}
if len(el) == 0 && err == nil {
err = lastUnsupportedError
}
return
}
// readToNextPublicKey reads packets until the start of the entity and leaves
// the first packet of the new entity in the Reader.
func readToNextPublicKey(packets *packet.Reader) (err error) {
var p packet.Packet
for {
p, err = packets.Next()
if err == io.EOF {
return
} else if err != nil {
if _, ok := err.(errors.UnsupportedError); ok {
err = nil
continue
}
return
}
if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
packets.Unread(p)
return
}
}
}
// ReadEntity reads an entity (public key, identities, subkeys etc) from the
// given Reader.
func ReadEntity(packets *packet.Reader) (*Entity, error) {
e := new(Entity)
e.Identities = make(map[string]*Identity)
p, err := packets.Next()
if err != nil {
return nil, err
}
var ok bool
if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
packets.Unread(p)
return nil, errors.StructuralError("first packet was not a public/private key")
}
e.PrimaryKey = &e.PrivateKey.PublicKey
}
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
return nil, errors.StructuralError("primary key cannot be used for signatures")
}
var current *Identity
var revocations []*packet.Signature
EachPacket:
for {
p, err := packets.Next()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
switch pkt := p.(type) {
case *packet.UserId:
current = new(Identity)
current.Name = pkt.Id
current.UserId = pkt
e.Identities[pkt.Id] = current
for {
p, err = packets.Next()
if err == io.EOF {
return nil, io.ErrUnexpectedEOF
} else if err != nil {
return nil, err
}
sig, ok := p.(*packet.Signature)
if !ok {
return nil, errors.StructuralError("user ID packet not followed by self-signature")
}
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
}
current.SelfSignature = sig
break
}
current.Signatures = append(current.Signatures, sig)
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
revocations = append(revocations, pkt)
} else if pkt.SigType == packet.SigTypeDirectSignature {
// TODO: RFC4880 5.2.1 permits signatures
// directly on keys (eg. to bind additional
// revocation keys).
} else if current == nil {
return nil, errors.StructuralError("signature packet found before user id packet")
} else {
current.Signatures = append(current.Signatures, pkt)
}
case *packet.PrivateKey:
if pkt.IsSubkey == false {
packets.Unread(p)
break EachPacket
}
err = addSubkey(e, packets, &pkt.PublicKey, pkt)
if err != nil {
return nil, err
}
case *packet.PublicKey:
if pkt.IsSubkey == false {
packets.Unread(p)
break EachPacket
}
err = addSubkey(e, packets, pkt, nil)
if err != nil {
return nil, err
}
default:
// we ignore unknown packets
}
}
if len(e.Identities) == 0 {
return nil, errors.StructuralError("entity without any identities")
}
for _, revocation := range revocations {
err = e.PrimaryKey.VerifyRevocationSignature(revocation)
if err == nil {
e.Revocations = append(e.Revocations, revocation)
} else {
// TODO: RFC 4880 5.2.3.15 defines revocation keys.
return nil, errors.StructuralError("revocation signature signed by alternate key")
}
}
return e, nil
}
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
var subKey Subkey
subKey.PublicKey = pub
subKey.PrivateKey = priv
p, err := packets.Next()
if err == io.EOF {
return io.ErrUnexpectedEOF
}
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
var ok bool
subKey.Sig, ok = p.(*packet.Signature)
if !ok {
return errors.StructuralError("subkey packet not followed by signature")
}
if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
return errors.StructuralError("subkey signature with wrong type")
}
err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
e.Subkeys = append(e.Subkeys, subKey)
return nil
}
const defaultRSAKeyBits = 2048
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
// single identity composed of the given full name, comment and email, any of
// which may be empty but must not contain any of "()<>\x00".
// If config is nil, sensible defaults will be used.
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
currentTime := config.Now()
bits := defaultRSAKeyBits
if config != nil && config.RSABits != 0 {
bits = config.RSABits
}
uid := packet.NewUserId(name, comment, email)
if uid == nil {
return nil, errors.InvalidArgumentError("user id field contained invalid characters")
}
signingPriv, err := rsa.GenerateKey(config.Random(), bits)
if err != nil {
return nil, err
}
encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
if err != nil {
return nil, err
}
e := &Entity{
PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
Identities: make(map[string]*Identity),
}
isPrimaryId := true
e.Identities[uid.Id] = &Identity{
Name: uid.Id,
UserId: uid,
SelfSignature: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypePositiveCert,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
IsPrimaryId: &isPrimaryId,
FlagsValid: true,
FlagSign: true,
FlagCertify: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
// If the user passes in a DefaultHash via packet.Config,
// set the PreferredHash for the SelfSignature.
if config != nil && config.DefaultHash != 0 {
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
}
// Likewise for DefaultCipher.
if config != nil && config.DefaultCipher != 0 {
e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
}
e.Subkeys = make([]Subkey, 1)
e.Subkeys[0] = Subkey{
PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
Sig: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypeSubkeyBinding,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
FlagsValid: true,
FlagEncryptStorage: true,
FlagEncryptCommunications: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
e.Subkeys[0].PublicKey.IsSubkey = true
e.Subkeys[0].PrivateKey.IsSubkey = true
return e, nil
}
// SerializePrivate serializes an Entity, including private key material, to
// the given Writer. For now, it must only be used on an Entity returned from
// NewEntity.
// If config is nil, sensible defaults will be used.
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
err = e.PrivateKey.Serialize(w)
if err != nil {
return
}
for _, ident := range e.Identities {
err = ident.UserId.Serialize(w)
if err != nil {
return
}
err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
if err != nil {
return
}
err = ident.SelfSignature.Serialize(w)
if err != nil {
return
}
}
for _, subkey := range e.Subkeys {
err = subkey.PrivateKey.Serialize(w)
if err != nil {
return
}
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
if err != nil {
return
}
err = subkey.Sig.Serialize(w)
if err != nil {
return
}
}
return nil
}
// Serialize writes the public part of the given Entity to w. (No private
// key material will be output).
func (e *Entity) Serialize(w io.Writer) error {
err := e.PrimaryKey.Serialize(w)
if err != nil {
return err
}
for _, ident := range e.Identities {
err = ident.UserId.Serialize(w)
if err != nil {
return err
}
err = ident.SelfSignature.Serialize(w)
if err != nil {
return err
}
for _, sig := range ident.Signatures {
err = sig.Serialize(w)
if err != nil {
return err
}
}
}
for _, subkey := range e.Subkeys {
err = subkey.PublicKey.Serialize(w)
if err != nil {
return err
}
err = subkey.Sig.Serialize(w)
if err != nil {
return err
}
}
return nil
}
// SignIdentity adds a signature to e, from signer, attesting that identity is
// associated with e. The provided identity must already be an element of
// e.Identities and the private key of signer must have been decrypted if
// necessary.
// If config is nil, sensible defaults will be used.
func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
if signer.PrivateKey == nil {
return errors.InvalidArgumentError("signing Entity must have a private key")
}
if signer.PrivateKey.Encrypted {
return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
}
ident, ok := e.Identities[identity]
if !ok {
return errors.InvalidArgumentError("given identity string not found in Entity")
}
sig := &packet.Signature{
SigType: packet.SigTypeGenericCert,
PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
Hash: config.Hash(),
CreationTime: config.Now(),
IssuerKeyId: &signer.PrivateKey.KeyId,
}
if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
return err
}
ident.Signatures = append(ident.Signatures, sig)
return nil
}

123
vendor/golang.org/x/crypto/openpgp/packet/compressed.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"compress/bzip2"
"compress/flate"
"compress/zlib"
"golang.org/x/crypto/openpgp/errors"
"io"
"strconv"
)
// Compressed represents a compressed OpenPGP packet. The decompressed contents
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
type Compressed struct {
Body io.Reader
}
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
)
// CompressionConfig contains compressor configuration settings.
type CompressionConfig struct {
// Level is the compression level to use. It must be set to
// between -1 and 9, with -1 causing the compressor to use the
// default compression level, 0 causing the compressor to use
// no compression and 1 to 9 representing increasing (better,
// slower) compression levels. If Level is less than -1 or
// more then 9, a non-nil error will be returned during
// encryption. See the constants above for convenient common
// settings for Level.
Level int
}
func (c *Compressed) parse(r io.Reader) error {
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
switch buf[0] {
case 1:
c.Body = flate.NewReader(r)
case 2:
c.Body, err = zlib.NewReader(r)
case 3:
c.Body = bzip2.NewReader(r)
default:
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
}
return err
}
// compressedWriterCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
// method writes to the compressor.
type compressedWriteCloser struct {
sh io.Closer // Stream Header
c io.WriteCloser // Compressor
}
func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
return cwc.c.Write(p)
}
func (cwc compressedWriteCloser) Close() (err error) {
err = cwc.c.Close()
if err != nil {
return err
}
return cwc.sh.Close()
}
// SerializeCompressed serializes a compressed data packet to w and
// returns a WriteCloser to which the literal data packets themselves
// can be written and which MUST be closed on completion. If cc is
// nil, sensible defaults will be used to configure the compression
// algorithm.
func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
compressed, err := serializeStreamHeader(w, packetTypeCompressed)
if err != nil {
return
}
_, err = compressed.Write([]byte{uint8(algo)})
if err != nil {
return
}
level := DefaultCompression
if cc != nil {
level = cc.Level
}
var compressor io.WriteCloser
switch algo {
case CompressionZIP:
compressor, err = flate.NewWriter(compressed, level)
case CompressionZLIB:
compressor, err = zlib.NewWriterLevel(compressed, level)
default:
s := strconv.Itoa(int(algo))
err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
}
if err != nil {
return
}
literaldata = compressedWriteCloser{compressed, compressor}
return
}

91
vendor/golang.org/x/crypto/openpgp/packet/config.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"crypto/rand"
"io"
"time"
)
// Config collects a number of parameters along with sensible defaults.
// A nil *Config is valid and results in all default values.
type Config struct {
// Rand provides the source of entropy.
// If nil, the crypto/rand Reader is used.
Rand io.Reader
// DefaultHash is the default hash function to be used.
// If zero, SHA-256 is used.
DefaultHash crypto.Hash
// DefaultCipher is the cipher to be used.
// If zero, AES-128 is used.
DefaultCipher CipherFunction
// Time returns the current time as the number of seconds since the
// epoch. If Time is nil, time.Now is used.
Time func() time.Time
// DefaultCompressionAlgo is the compression algorithm to be
// applied to the plaintext before encryption. If zero, no
// compression is done.
DefaultCompressionAlgo CompressionAlgo
// CompressionConfig configures the compression settings.
CompressionConfig *CompressionConfig
// S2KCount is only used for symmetric encryption. It
// determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 1024 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 65536 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. When set, it is strongly encrouraged to
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
S2KCount int
// RSABits is the number of bits in new RSA keys made with NewEntity.
// If zero, then 2048 bit keys are created.
RSABits int
}
func (c *Config) Random() io.Reader {
if c == nil || c.Rand == nil {
return rand.Reader
}
return c.Rand
}
func (c *Config) Hash() crypto.Hash {
if c == nil || uint(c.DefaultHash) == 0 {
return crypto.SHA256
}
return c.DefaultHash
}
func (c *Config) Cipher() CipherFunction {
if c == nil || uint8(c.DefaultCipher) == 0 {
return CipherAES128
}
return c.DefaultCipher
}
func (c *Config) Now() time.Time {
if c == nil || c.Time == nil {
return time.Now()
}
return c.Time()
}
func (c *Config) Compression() CompressionAlgo {
if c == nil {
return CompressionNone
}
return c.DefaultCompressionAlgo
}
func (c *Config) PasswordHashIterations() int {
if c == nil || c.S2KCount == 0 {
return 0
}
return c.S2KCount
}

View File

@ -0,0 +1,206 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/rsa"
"encoding/binary"
"io"
"math/big"
"strconv"
"golang.org/x/crypto/openpgp/elgamal"
"golang.org/x/crypto/openpgp/errors"
)
const encryptedKeyVersion = 3
// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
// section 5.1.
type EncryptedKey struct {
KeyId uint64
Algo PublicKeyAlgorithm
CipherFunc CipherFunction // only valid after a successful Decrypt
Key []byte // only valid after a successful Decrypt
encryptedMPI1, encryptedMPI2 parsedMPI
}
func (e *EncryptedKey) parse(r io.Reader) (err error) {
var buf [10]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != encryptedKeyVersion {
return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
}
e.KeyId = binary.BigEndian.Uint64(buf[1:9])
e.Algo = PublicKeyAlgorithm(buf[9])
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
case PubKeyAlgoElGamal:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
if err != nil {
return
}
}
_, err = consumeAll(r)
return
}
func checksumKeyMaterial(key []byte) uint16 {
var checksum uint16
for _, v := range key {
checksum += uint16(v)
}
return checksum
}
// Decrypt decrypts an encrypted session key with the given private key. The
// private key must have been decrypted first.
// If config is nil, sensible defaults will be used.
func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
var err error
var b []byte
// TODO(agl): use session key decryption routines here to avoid
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
k := priv.PrivateKey.(*rsa.PrivateKey)
b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
default:
err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
}
if err != nil {
return err
}
e.CipherFunc = CipherFunction(b[0])
e.Key = b[1 : len(b)-2]
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
checksum := checksumKeyMaterial(e.Key)
if checksum != expectedChecksum {
return errors.StructuralError("EncryptedKey checksum incorrect")
}
return nil
}
// Serialize writes the encrypted key packet, e, to w.
func (e *EncryptedKey) Serialize(w io.Writer) error {
var mpiLen int
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
mpiLen = 2 + len(e.encryptedMPI1.bytes)
case PubKeyAlgoElGamal:
mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
default:
return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
}
serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
w.Write([]byte{encryptedKeyVersion})
binary.Write(w, binary.BigEndian, e.KeyId)
w.Write([]byte{byte(e.Algo)})
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
writeMPIs(w, e.encryptedMPI1)
case PubKeyAlgoElGamal:
writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
default:
panic("internal error")
}
return nil
}
// SerializeEncryptedKey serializes an encrypted key packet to w that contains
// key, encrypted to pub.
// If config is nil, sensible defaults will be used.
func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
var buf [10]byte
buf[0] = encryptedKeyVersion
binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
buf[9] = byte(pub.PubKeyAlgo)
keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
keyBlock[0] = byte(cipherFunc)
copy(keyBlock[1:], key)
checksum := checksumKeyMaterial(key)
keyBlock[1+len(key)] = byte(checksum >> 8)
keyBlock[1+len(key)+1] = byte(checksum)
switch pub.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
case PubKeyAlgoElGamal:
return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
}
packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
}
func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
}
packetLen := 10 /* header length */
packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
err = writeBig(w, c1)
if err != nil {
return err
}
return writeBig(w, c2)
}

89
vendor/golang.org/x/crypto/openpgp/packet/literal.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"encoding/binary"
"io"
)
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
type LiteralData struct {
IsBinary bool
FileName string
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
Body io.Reader
}
// ForEyesOnly returns whether the contents of the LiteralData have been marked
// as especially sensitive.
func (l *LiteralData) ForEyesOnly() bool {
return l.FileName == "_CONSOLE"
}
func (l *LiteralData) parse(r io.Reader) (err error) {
var buf [256]byte
_, err = readFull(r, buf[:2])
if err != nil {
return
}
l.IsBinary = buf[0] == 'b'
fileNameLen := int(buf[1])
_, err = readFull(r, buf[:fileNameLen])
if err != nil {
return
}
l.FileName = string(buf[:fileNameLen])
_, err = readFull(r, buf[:4])
if err != nil {
return
}
l.Time = binary.BigEndian.Uint32(buf[:4])
l.Body = r
return
}
// SerializeLiteral serializes a literal data packet to w and returns a
// WriteCloser to which the data itself can be written and which MUST be closed
// on completion. The fileName is truncated to 255 bytes.
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
var buf [4]byte
buf[0] = 't'
if isBinary {
buf[0] = 'b'
}
if len(fileName) > 255 {
fileName = fileName[:255]
}
buf[1] = byte(len(fileName))
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
if err != nil {
return
}
_, err = inner.Write(buf[:2])
if err != nil {
return
}
_, err = inner.Write([]byte(fileName))
if err != nil {
return
}
binary.BigEndian.PutUint32(buf[:], time)
_, err = inner.Write(buf[:])
if err != nil {
return
}
plaintext = inner
return
}

143
vendor/golang.org/x/crypto/openpgp/packet/ocfb.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
package packet
import (
"crypto/cipher"
)
type ocfbEncrypter struct {
b cipher.Block
fre []byte
outUsed int
}
// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
// performed.
type OCFBResyncOption bool
const (
OCFBResync OCFBResyncOption = true
OCFBNoResync OCFBResyncOption = false
)
// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
// cipher feedback mode using the given cipher.Block, and an initial amount of
// ciphertext. randData must be random bytes and be the same length as the
// cipher.Block's block size. Resync determines if the "resynchronization step"
// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
// this point.
func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
blockSize := block.BlockSize()
if len(randData) != blockSize {
return nil, nil
}
x := &ocfbEncrypter{
b: block,
fre: make([]byte, blockSize),
outUsed: 0,
}
prefix := make([]byte, blockSize+2)
block.Encrypt(x.fre, x.fre)
for i := 0; i < blockSize; i++ {
prefix[i] = randData[i] ^ x.fre[i]
}
block.Encrypt(x.fre, prefix[:blockSize])
prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
if resync {
block.Encrypt(x.fre, prefix[2:])
} else {
x.fre[0] = prefix[blockSize]
x.fre[1] = prefix[blockSize+1]
x.outUsed = 2
}
return x, prefix
}
func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
for i := 0; i < len(src); i++ {
if x.outUsed == len(x.fre) {
x.b.Encrypt(x.fre, x.fre)
x.outUsed = 0
}
x.fre[x.outUsed] ^= src[i]
dst[i] = x.fre[x.outUsed]
x.outUsed++
}
}
type ocfbDecrypter struct {
b cipher.Block
fre []byte
outUsed int
}
// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
// cipher feedback mode using the given cipher.Block. Prefix must be the first
// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
// block size. If an incorrect key is detected then nil is returned. On
// successful exit, blockSize+2 bytes of decrypted data are written into
// prefix. Resync determines if the "resynchronization step" from RFC 4880,
// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
blockSize := block.BlockSize()
if len(prefix) != blockSize+2 {
return nil
}
x := &ocfbDecrypter{
b: block,
fre: make([]byte, blockSize),
outUsed: 0,
}
prefixCopy := make([]byte, len(prefix))
copy(prefixCopy, prefix)
block.Encrypt(x.fre, x.fre)
for i := 0; i < blockSize; i++ {
prefixCopy[i] ^= x.fre[i]
}
block.Encrypt(x.fre, prefix[:blockSize])
prefixCopy[blockSize] ^= x.fre[0]
prefixCopy[blockSize+1] ^= x.fre[1]
if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
return nil
}
if resync {
block.Encrypt(x.fre, prefix[2:])
} else {
x.fre[0] = prefix[blockSize]
x.fre[1] = prefix[blockSize+1]
x.outUsed = 2
}
copy(prefix, prefixCopy)
return x
}
func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
for i := 0; i < len(src); i++ {
if x.outUsed == len(x.fre) {
x.b.Encrypt(x.fre, x.fre)
x.outUsed = 0
}
c := src[i]
dst[i] = x.fre[x.outUsed] ^ src[i]
x.fre[x.outUsed] = c
x.outUsed++
}
}

View File

@ -0,0 +1,73 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"encoding/binary"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
"io"
"strconv"
)
// OnePassSignature represents a one-pass signature packet. See RFC 4880,
// section 5.4.
type OnePassSignature struct {
SigType SignatureType
Hash crypto.Hash
PubKeyAlgo PublicKeyAlgorithm
KeyId uint64
IsLast bool
}
const onePassSignatureVersion = 3
func (ops *OnePassSignature) parse(r io.Reader) (err error) {
var buf [13]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != onePassSignatureVersion {
err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
}
var ok bool
ops.Hash, ok = s2k.HashIdToHash(buf[2])
if !ok {
return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
}
ops.SigType = SignatureType(buf[1])
ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
ops.IsLast = buf[12] != 0
return
}
// Serialize marshals the given OnePassSignature to w.
func (ops *OnePassSignature) Serialize(w io.Writer) error {
var buf [13]byte
buf[0] = onePassSignatureVersion
buf[1] = uint8(ops.SigType)
var ok bool
buf[2], ok = s2k.HashToHashId(ops.Hash)
if !ok {
return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
}
buf[3] = uint8(ops.PubKeyAlgo)
binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
if ops.IsLast {
buf[12] = 1
}
if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
return err
}
_, err := w.Write(buf[:])
return err
}

162
vendor/golang.org/x/crypto/openpgp/packet/opaque.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"io"
"io/ioutil"
"golang.org/x/crypto/openpgp/errors"
)
// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
// useful for splitting and storing the original packet contents separately,
// handling unsupported packet types or accessing parts of the packet not yet
// implemented by this package.
type OpaquePacket struct {
// Packet type
Tag uint8
// Reason why the packet was parsed opaquely
Reason error
// Binary contents of the packet data
Contents []byte
}
func (op *OpaquePacket) parse(r io.Reader) (err error) {
op.Contents, err = ioutil.ReadAll(r)
return
}
// Serialize marshals the packet to a writer in its original form, including
// the packet header.
func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
if err == nil {
_, err = w.Write(op.Contents)
}
return
}
// Parse attempts to parse the opaque contents into a structure supported by
// this package. If the packet is not known then the result will be another
// OpaquePacket.
func (op *OpaquePacket) Parse() (p Packet, err error) {
hdr := bytes.NewBuffer(nil)
err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
if err != nil {
op.Reason = err
return op, err
}
p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
if err != nil {
op.Reason = err
p = op
}
return
}
// OpaqueReader reads OpaquePackets from an io.Reader.
type OpaqueReader struct {
r io.Reader
}
func NewOpaqueReader(r io.Reader) *OpaqueReader {
return &OpaqueReader{r: r}
}
// Read the next OpaquePacket.
func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
tag, _, contents, err := readHeader(or.r)
if err != nil {
return
}
op = &OpaquePacket{Tag: uint8(tag), Reason: err}
err = op.parse(contents)
if err != nil {
consumeAll(contents)
}
return
}
// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
// as found in signature and user attribute packets.
type OpaqueSubpacket struct {
SubType uint8
Contents []byte
}
// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
// their byte representation.
func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
var (
subHeaderLen int
subPacket *OpaqueSubpacket
)
for len(contents) > 0 {
subHeaderLen, subPacket, err = nextSubpacket(contents)
if err != nil {
break
}
result = append(result, subPacket)
contents = contents[subHeaderLen+len(subPacket.Contents):]
}
return
}
func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
// RFC 4880, section 5.2.3.1
var subLen uint32
if len(contents) < 1 {
goto Truncated
}
subPacket = &OpaqueSubpacket{}
switch {
case contents[0] < 192:
subHeaderLen = 2 // 1 length byte, 1 subtype byte
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[0])
contents = contents[1:]
case contents[0] < 255:
subHeaderLen = 3 // 2 length bytes, 1 subtype
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
contents = contents[2:]
default:
subHeaderLen = 6 // 5 length bytes, 1 subtype
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[1])<<24 |
uint32(contents[2])<<16 |
uint32(contents[3])<<8 |
uint32(contents[4])
contents = contents[5:]
}
if subLen > uint32(len(contents)) || subLen == 0 {
goto Truncated
}
subPacket.SubType = contents[0]
subPacket.Contents = contents[1:subLen]
return
Truncated:
err = errors.StructuralError("subpacket truncated")
return
}
func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
buf := make([]byte, 6)
n := serializeSubpacketLength(buf, len(osp.Contents)+1)
buf[n] = osp.SubType
if _, err = w.Write(buf[:n+1]); err != nil {
return
}
_, err = w.Write(osp.Contents)
return
}

549
vendor/golang.org/x/crypto/openpgp/packet/packet.go generated vendored Normal file
View File

@ -0,0 +1,549 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package packet implements parsing and serialization of OpenPGP packets, as
// specified in RFC 4880.
package packet // import "golang.org/x/crypto/openpgp/packet"
import (
"bufio"
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/rsa"
"io"
"math/big"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/openpgp/errors"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
// ErrUnexpectedEOF rather than EOF.
func readFull(r io.Reader, buf []byte) (n int, err error) {
n, err = io.ReadFull(r, buf)
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
func readLength(r io.Reader) (length int64, isPartial bool, err error) {
var buf [4]byte
_, err = readFull(r, buf[:1])
if err != nil {
return
}
switch {
case buf[0] < 192:
length = int64(buf[0])
case buf[0] < 224:
length = int64(buf[0]-192) << 8
_, err = readFull(r, buf[0:1])
if err != nil {
return
}
length += int64(buf[0]) + 192
case buf[0] < 255:
length = int64(1) << (buf[0] & 0x1f)
isPartial = true
default:
_, err = readFull(r, buf[0:4])
if err != nil {
return
}
length = int64(buf[0])<<24 |
int64(buf[1])<<16 |
int64(buf[2])<<8 |
int64(buf[3])
}
return
}
// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
// The continuation lengths are parsed and removed from the stream and EOF is
// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
type partialLengthReader struct {
r io.Reader
remaining int64
isPartial bool
}
func (r *partialLengthReader) Read(p []byte) (n int, err error) {
for r.remaining == 0 {
if !r.isPartial {
return 0, io.EOF
}
r.remaining, r.isPartial, err = readLength(r.r)
if err != nil {
return 0, err
}
}
toRead := int64(len(p))
if toRead > r.remaining {
toRead = r.remaining
}
n, err = r.r.Read(p[:int(toRead)])
r.remaining -= int64(n)
if n < int(toRead) && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
// See RFC 4880, section 4.2.2.4.
type partialLengthWriter struct {
w io.WriteCloser
lengthByte [1]byte
}
func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
for len(p) > 0 {
for power := uint(14); power < 32; power-- {
l := 1 << power
if len(p) >= l {
w.lengthByte[0] = 224 + uint8(power)
_, err = w.w.Write(w.lengthByte[:])
if err != nil {
return
}
var m int
m, err = w.w.Write(p[:l])
n += m
if err != nil {
return
}
p = p[l:]
break
}
}
}
return
}
func (w *partialLengthWriter) Close() error {
w.lengthByte[0] = 0
_, err := w.w.Write(w.lengthByte[:])
if err != nil {
return err
}
return w.w.Close()
}
// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
// underlying Reader returns EOF before the limit has been reached.
type spanReader struct {
r io.Reader
n int64
}
func (l *spanReader) Read(p []byte) (n int, err error) {
if l.n <= 0 {
return 0, io.EOF
}
if int64(len(p)) > l.n {
p = p[0:l.n]
}
n, err = l.r.Read(p)
l.n -= int64(n)
if l.n > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readHeader parses a packet header and returns an io.Reader which will return
// the contents of the packet. See RFC 4880, section 4.2.
func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
var buf [4]byte
_, err = io.ReadFull(r, buf[:1])
if err != nil {
return
}
if buf[0]&0x80 == 0 {
err = errors.StructuralError("tag byte does not have MSB set")
return
}
if buf[0]&0x40 == 0 {
// Old format packet
tag = packetType((buf[0] & 0x3f) >> 2)
lengthType := buf[0] & 3
if lengthType == 3 {
length = -1
contents = r
return
}
lengthBytes := 1 << lengthType
_, err = readFull(r, buf[0:lengthBytes])
if err != nil {
return
}
for i := 0; i < lengthBytes; i++ {
length <<= 8
length |= int64(buf[i])
}
contents = &spanReader{r, length}
return
}
// New format packet
tag = packetType(buf[0] & 0x3f)
length, isPartial, err := readLength(r)
if err != nil {
return
}
if isPartial {
contents = &partialLengthReader{
remaining: length,
isPartial: true,
r: r,
}
length = -1
} else {
contents = &spanReader{r, length}
}
return
}
// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
// 4.2.
func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
var buf [6]byte
var n int
buf[0] = 0x80 | 0x40 | byte(ptype)
if length < 192 {
buf[1] = byte(length)
n = 2
} else if length < 8384 {
length -= 192
buf[1] = 192 + byte(length>>8)
buf[2] = byte(length)
n = 3
} else {
buf[1] = 255
buf[2] = byte(length >> 24)
buf[3] = byte(length >> 16)
buf[4] = byte(length >> 8)
buf[5] = byte(length)
n = 6
}
_, err = w.Write(buf[:n])
return
}
// serializeStreamHeader writes an OpenPGP packet header to w where the
// length of the packet is unknown. It returns a io.WriteCloser which can be
// used to write the contents of the packet. See RFC 4880, section 4.2.
func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
var buf [1]byte
buf[0] = 0x80 | 0x40 | byte(ptype)
_, err = w.Write(buf[:])
if err != nil {
return
}
out = &partialLengthWriter{w: w}
return
}
// Packet represents an OpenPGP packet. Users are expected to try casting
// instances of this interface to specific packet types.
type Packet interface {
parse(io.Reader) error
}
// consumeAll reads from the given Reader until error, returning the number of
// bytes read.
func consumeAll(r io.Reader) (n int64, err error) {
var m int
var buf [1024]byte
for {
m, err = r.Read(buf[:])
n += int64(m)
if err == io.EOF {
err = nil
return
}
if err != nil {
return
}
}
}
// packetType represents the numeric ids of the different OpenPGP packet types. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
type packetType uint8
const (
packetTypeEncryptedKey packetType = 1
packetTypeSignature packetType = 2
packetTypeSymmetricKeyEncrypted packetType = 3
packetTypeOnePassSignature packetType = 4
packetTypePrivateKey packetType = 5
packetTypePublicKey packetType = 6
packetTypePrivateSubkey packetType = 7
packetTypeCompressed packetType = 8
packetTypeSymmetricallyEncrypted packetType = 9
packetTypeLiteralData packetType = 11
packetTypeUserId packetType = 13
packetTypePublicSubkey packetType = 14
packetTypeUserAttribute packetType = 17
packetTypeSymmetricallyEncryptedMDC packetType = 18
)
// peekVersion detects the version of a public key packet about to
// be read. A bufio.Reader at the original position of the io.Reader
// is returned.
func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
bufr = bufio.NewReader(r)
var verBuf []byte
if verBuf, err = bufr.Peek(1); err != nil {
return
}
ver = verBuf[0]
return
}
// Read reads a single OpenPGP packet from the given io.Reader. If there is an
// error parsing a packet, the whole packet is consumed from the input.
func Read(r io.Reader) (p Packet, err error) {
tag, _, contents, err := readHeader(r)
if err != nil {
return
}
switch tag {
case packetTypeEncryptedKey:
p = new(EncryptedKey)
case packetTypeSignature:
var version byte
// Detect signature version
if contents, version, err = peekVersion(contents); err != nil {
return
}
if version < 4 {
p = new(SignatureV3)
} else {
p = new(Signature)
}
case packetTypeSymmetricKeyEncrypted:
p = new(SymmetricKeyEncrypted)
case packetTypeOnePassSignature:
p = new(OnePassSignature)
case packetTypePrivateKey, packetTypePrivateSubkey:
pk := new(PrivateKey)
if tag == packetTypePrivateSubkey {
pk.IsSubkey = true
}
p = pk
case packetTypePublicKey, packetTypePublicSubkey:
var version byte
if contents, version, err = peekVersion(contents); err != nil {
return
}
isSubkey := tag == packetTypePublicSubkey
if version < 4 {
p = &PublicKeyV3{IsSubkey: isSubkey}
} else {
p = &PublicKey{IsSubkey: isSubkey}
}
case packetTypeCompressed:
p = new(Compressed)
case packetTypeSymmetricallyEncrypted:
p = new(SymmetricallyEncrypted)
case packetTypeLiteralData:
p = new(LiteralData)
case packetTypeUserId:
p = new(UserId)
case packetTypeUserAttribute:
p = new(UserAttribute)
case packetTypeSymmetricallyEncryptedMDC:
se := new(SymmetricallyEncrypted)
se.MDC = true
p = se
default:
err = errors.UnknownPacketTypeError(tag)
}
if p != nil {
err = p.parse(contents)
}
if err != nil {
consumeAll(contents)
}
return
}
// SignatureType represents the different semantic meanings of an OpenPGP
// signature. See RFC 4880, section 5.2.1.
type SignatureType uint8
const (
SigTypeBinary SignatureType = 0
SigTypeText = 1
SigTypeGenericCert = 0x10
SigTypePersonaCert = 0x11
SigTypeCasualCert = 0x12
SigTypePositiveCert = 0x13
SigTypeSubkeyBinding = 0x18
SigTypePrimaryKeyBinding = 0x19
SigTypeDirectSignature = 0x1F
SigTypeKeyRevocation = 0x20
SigTypeSubkeyRevocation = 0x28
)
// PublicKeyAlgorithm represents the different public key system specified for
// OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
type PublicKeyAlgorithm uint8
const (
PubKeyAlgoRSA PublicKeyAlgorithm = 1
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
// key of the given type.
func (pka PublicKeyAlgorithm) CanEncrypt() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal:
return true
}
return false
}
// CanSign returns true if it's possible for a public key of the given type to
// sign a message.
func (pka PublicKeyAlgorithm) CanSign() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
return true
}
return false
}
// CipherFunction represents the different block ciphers specified for OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
type CipherFunction uint8
const (
Cipher3DES CipherFunction = 2
CipherCAST5 CipherFunction = 3
CipherAES128 CipherFunction = 7
CipherAES192 CipherFunction = 8
CipherAES256 CipherFunction = 9
)
// KeySize returns the key size, in bytes, of cipher.
func (cipher CipherFunction) KeySize() int {
switch cipher {
case Cipher3DES:
return 24
case CipherCAST5:
return cast5.KeySize
case CipherAES128:
return 16
case CipherAES192:
return 24
case CipherAES256:
return 32
}
return 0
}
// blockSize returns the block size, in bytes, of cipher.
func (cipher CipherFunction) blockSize() int {
switch cipher {
case Cipher3DES:
return des.BlockSize
case CipherCAST5:
return 8
case CipherAES128, CipherAES192, CipherAES256:
return 16
}
return 0
}
// new returns a fresh instance of the given cipher.
func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
switch cipher {
case Cipher3DES:
block, _ = des.NewTripleDESCipher(key)
case CipherCAST5:
block, _ = cast5.NewCipher(key)
case CipherAES128, CipherAES192, CipherAES256:
block, _ = aes.NewCipher(key)
}
return
}
// readMPI reads a big integer from r. The bit length returned is the bit
// length that was specified in r. This is preserved so that the integer can be
// reserialized exactly.
func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
var buf [2]byte
_, err = readFull(r, buf[0:])
if err != nil {
return
}
bitLength = uint16(buf[0])<<8 | uint16(buf[1])
numBytes := (int(bitLength) + 7) / 8
mpi = make([]byte, numBytes)
_, err = readFull(r, mpi)
// According to RFC 4880 3.2. we should check that the MPI has no leading
// zeroes (at least when not an encrypted MPI?), but this implementation
// does generate leading zeroes, so we keep accepting them.
return
}
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
// Note that we can produce leading zeroes, in violation of RFC 4880 3.2.
// Implementations seem to be tolerant of them, and stripping them would
// make it complex to guarantee matching re-serialization.
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil {
_, err = w.Write(mpiBytes)
}
return
}
// writeBig serializes a *big.Int to w.
func writeBig(w io.Writer, i *big.Int) error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}
// padToKeySize left-pads a MPI with zeroes to match the length of the
// specified RSA public.
func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
k := (pub.N.BitLen() + 7) / 8
if len(b) >= k {
return b
}
bb := make([]byte, k)
copy(bb[len(bb)-len(b):], b)
return bb
}
// CompressionAlgo Represents the different compression algorithms
// supported by OpenPGP (except for BZIP2, which is not currently
// supported). See Section 9.3 of RFC 4880.
type CompressionAlgo uint8
const (
CompressionNone CompressionAlgo = 0
CompressionZIP CompressionAlgo = 1
CompressionZLIB CompressionAlgo = 2
)

View File

@ -0,0 +1,380 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto"
"crypto/cipher"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"crypto/sha1"
"io"
"io/ioutil"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/elgamal"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
// PrivateKey represents a possibly encrypted private key. See RFC 4880,
// section 5.5.3.
type PrivateKey struct {
PublicKey
Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
encryptedData []byte
cipher CipherFunction
s2k func(out, in []byte)
PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer.
sha1Checksum bool
iv []byte
}
func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that
// implements RSA or ECDSA.
func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
pk := new(PrivateKey)
switch pubkey := signer.Public().(type) {
case rsa.PublicKey:
pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
pk.PubKeyAlgo = PubKeyAlgoRSASignOnly
case ecdsa.PublicKey:
pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
default:
panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
}
pk.PrivateKey = signer
return pk
}
func (pk *PrivateKey) parse(r io.Reader) (err error) {
err = (&pk.PublicKey).parse(r)
if err != nil {
return
}
var buf [1]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
s2kType := buf[0]
switch s2kType {
case 0:
pk.s2k = nil
pk.Encrypted = false
case 254, 255:
_, err = readFull(r, buf[:])
if err != nil {
return
}
pk.cipher = CipherFunction(buf[0])
pk.Encrypted = true
pk.s2k, err = s2k.Parse(r)
if err != nil {
return
}
if s2kType == 254 {
pk.sha1Checksum = true
}
default:
return errors.UnsupportedError("deprecated s2k function in private key")
}
if pk.Encrypted {
blockSize := pk.cipher.blockSize()
if blockSize == 0 {
return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
}
pk.iv = make([]byte, blockSize)
_, err = readFull(r, pk.iv)
if err != nil {
return
}
}
pk.encryptedData, err = ioutil.ReadAll(r)
if err != nil {
return
}
if !pk.Encrypted {
return pk.parsePrivateKey(pk.encryptedData)
}
return
}
func mod64kHash(d []byte) uint16 {
var h uint16
for _, b := range d {
h += uint16(b)
}
return h
}
func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
// TODO(agl): support encrypted private keys
buf := bytes.NewBuffer(nil)
err = pk.PublicKey.serializeWithoutHeaders(buf)
if err != nil {
return
}
buf.WriteByte(0 /* no encryption */)
privateKeyBuf := bytes.NewBuffer(nil)
switch priv := pk.PrivateKey.(type) {
case *rsa.PrivateKey:
err = serializeRSAPrivateKey(privateKeyBuf, priv)
case *dsa.PrivateKey:
err = serializeDSAPrivateKey(privateKeyBuf, priv)
case *elgamal.PrivateKey:
err = serializeElGamalPrivateKey(privateKeyBuf, priv)
case *ecdsa.PrivateKey:
err = serializeECDSAPrivateKey(privateKeyBuf, priv)
default:
err = errors.InvalidArgumentError("unknown private key type")
}
if err != nil {
return
}
ptype := packetTypePrivateKey
contents := buf.Bytes()
privateKeyBytes := privateKeyBuf.Bytes()
if pk.IsSubkey {
ptype = packetTypePrivateSubkey
}
err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2)
if err != nil {
return
}
_, err = w.Write(contents)
if err != nil {
return
}
_, err = w.Write(privateKeyBytes)
if err != nil {
return
}
checksum := mod64kHash(privateKeyBytes)
var checksumBytes [2]byte
checksumBytes[0] = byte(checksum >> 8)
checksumBytes[1] = byte(checksum)
_, err = w.Write(checksumBytes[:])
return
}
func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
err := writeBig(w, priv.D)
if err != nil {
return err
}
err = writeBig(w, priv.Primes[1])
if err != nil {
return err
}
err = writeBig(w, priv.Primes[0])
if err != nil {
return err
}
return writeBig(w, priv.Precomputed.Qinv)
}
func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
return writeBig(w, priv.X)
}
func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
return writeBig(w, priv.X)
}
func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
return writeBig(w, priv.D)
}
// Decrypt decrypts an encrypted private key using a passphrase.
func (pk *PrivateKey) Decrypt(passphrase []byte) error {
if !pk.Encrypted {
return nil
}
key := make([]byte, pk.cipher.KeySize())
pk.s2k(key, passphrase)
block := pk.cipher.new(key)
cfb := cipher.NewCFBDecrypter(block, pk.iv)
data := make([]byte, len(pk.encryptedData))
cfb.XORKeyStream(data, pk.encryptedData)
if pk.sha1Checksum {
if len(data) < sha1.Size {
return errors.StructuralError("truncated private key data")
}
h := sha1.New()
h.Write(data[:len(data)-sha1.Size])
sum := h.Sum(nil)
if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
return errors.StructuralError("private key checksum failure")
}
data = data[:len(data)-sha1.Size]
} else {
if len(data) < 2 {
return errors.StructuralError("truncated private key data")
}
var sum uint16
for i := 0; i < len(data)-2; i++ {
sum += uint16(data[i])
}
if data[len(data)-2] != uint8(sum>>8) ||
data[len(data)-1] != uint8(sum) {
return errors.StructuralError("private key checksum failure")
}
data = data[:len(data)-2]
}
return pk.parsePrivateKey(data)
}
func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
switch pk.PublicKey.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
return pk.parseRSAPrivateKey(data)
case PubKeyAlgoDSA:
return pk.parseDSAPrivateKey(data)
case PubKeyAlgoElGamal:
return pk.parseElGamalPrivateKey(data)
case PubKeyAlgoECDSA:
return pk.parseECDSAPrivateKey(data)
}
panic("impossible")
}
func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
rsaPriv := new(rsa.PrivateKey)
rsaPriv.PublicKey = *rsaPub
buf := bytes.NewBuffer(data)
d, _, err := readMPI(buf)
if err != nil {
return
}
p, _, err := readMPI(buf)
if err != nil {
return
}
q, _, err := readMPI(buf)
if err != nil {
return
}
rsaPriv.D = new(big.Int).SetBytes(d)
rsaPriv.Primes = make([]*big.Int, 2)
rsaPriv.Primes[0] = new(big.Int).SetBytes(p)
rsaPriv.Primes[1] = new(big.Int).SetBytes(q)
if err := rsaPriv.Validate(); err != nil {
return err
}
rsaPriv.Precompute()
pk.PrivateKey = rsaPriv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
dsaPriv := new(dsa.PrivateKey)
dsaPriv.PublicKey = *dsaPub
buf := bytes.NewBuffer(data)
x, _, err := readMPI(buf)
if err != nil {
return
}
dsaPriv.X = new(big.Int).SetBytes(x)
pk.PrivateKey = dsaPriv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
priv := new(elgamal.PrivateKey)
priv.PublicKey = *pub
buf := bytes.NewBuffer(data)
x, _, err := readMPI(buf)
if err != nil {
return
}
priv.X = new(big.Int).SetBytes(x)
pk.PrivateKey = priv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
buf := bytes.NewBuffer(data)
d, _, err := readMPI(buf)
if err != nil {
return
}
pk.PrivateKey = &ecdsa.PrivateKey{
PublicKey: *ecdsaPub,
D: new(big.Int).SetBytes(d),
}
pk.Encrypted = false
pk.encryptedData = nil
return nil
}

753
vendor/golang.org/x/crypto/openpgp/packet/public_key.go generated vendored Normal file
View File

@ -0,0 +1,753 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/binary"
"fmt"
"hash"
"io"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/elgamal"
"golang.org/x/crypto/openpgp/errors"
)
var (
// NIST curve P-256
oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}
// NIST curve P-384
oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22}
// NIST curve P-521
oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23}
)
const maxOIDLength = 8
// ecdsaKey stores the algorithm-specific fields for ECDSA keys.
// as defined in RFC 6637, Section 9.
type ecdsaKey struct {
// oid contains the OID byte sequence identifying the elliptic curve used
oid []byte
// p contains the elliptic curve point that represents the public key
p parsedMPI
}
// parseOID reads the OID for the curve as defined in RFC 6637, Section 9.
func parseOID(r io.Reader) (oid []byte, err error) {
buf := make([]byte, maxOIDLength)
if _, err = readFull(r, buf[:1]); err != nil {
return
}
oidLen := buf[0]
if int(oidLen) > len(buf) {
err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen)))
return
}
oid = buf[:oidLen]
_, err = readFull(r, oid)
return
}
func (f *ecdsaKey) parse(r io.Reader) (err error) {
if f.oid, err = parseOID(r); err != nil {
return err
}
f.p.bytes, f.p.bitLength, err = readMPI(r)
return
}
func (f *ecdsaKey) serialize(w io.Writer) (err error) {
buf := make([]byte, maxOIDLength+1)
buf[0] = byte(len(f.oid))
copy(buf[1:], f.oid)
if _, err = w.Write(buf[:len(f.oid)+1]); err != nil {
return
}
return writeMPIs(w, f.p)
}
func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) {
var c elliptic.Curve
if bytes.Equal(f.oid, oidCurveP256) {
c = elliptic.P256()
} else if bytes.Equal(f.oid, oidCurveP384) {
c = elliptic.P384()
} else if bytes.Equal(f.oid, oidCurveP521) {
c = elliptic.P521()
} else {
return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid))
}
x, y := elliptic.Unmarshal(c, f.p.bytes)
if x == nil {
return nil, errors.UnsupportedError("failed to parse EC point")
}
return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil
}
func (f *ecdsaKey) byteLen() int {
return 1 + len(f.oid) + 2 + len(f.p.bytes)
}
type kdfHashFunction byte
type kdfAlgorithm byte
// ecdhKdf stores key derivation function parameters
// used for ECDH encryption. See RFC 6637, Section 9.
type ecdhKdf struct {
KdfHash kdfHashFunction
KdfAlgo kdfAlgorithm
}
func (f *ecdhKdf) parse(r io.Reader) (err error) {
buf := make([]byte, 1)
if _, err = readFull(r, buf); err != nil {
return
}
kdfLen := int(buf[0])
if kdfLen < 3 {
return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
}
buf = make([]byte, kdfLen)
if _, err = readFull(r, buf); err != nil {
return
}
reserved := int(buf[0])
f.KdfHash = kdfHashFunction(buf[1])
f.KdfAlgo = kdfAlgorithm(buf[2])
if reserved != 0x01 {
return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved))
}
return
}
func (f *ecdhKdf) serialize(w io.Writer) (err error) {
buf := make([]byte, 4)
// See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys.
buf[0] = byte(0x03) // Length of the following fields
buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now
buf[2] = byte(f.KdfHash)
buf[3] = byte(f.KdfAlgo)
_, err = w.Write(buf[:])
return
}
func (f *ecdhKdf) byteLen() int {
return 4
}
// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
type PublicKey struct {
CreationTime time.Time
PubKeyAlgo PublicKeyAlgorithm
PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey
Fingerprint [20]byte
KeyId uint64
IsSubkey bool
n, e, p, q, g, y parsedMPI
// RFC 6637 fields
ec *ecdsaKey
ecdh *ecdhKdf
}
// signingKey provides a convenient abstraction over signature verification
// for v3 and v4 public keys.
type signingKey interface {
SerializeSignaturePrefix(io.Writer)
serializeWithoutHeaders(io.Writer) error
}
func fromBig(n *big.Int) parsedMPI {
return parsedMPI{
bytes: n.Bytes(),
bitLength: uint16(n.BitLen()),
}
}
// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoRSA,
PublicKey: pub,
n: fromBig(pub.N),
e: fromBig(big.NewInt(int64(pub.E))),
}
pk.setFingerPrintAndKeyId()
return pk
}
// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoDSA,
PublicKey: pub,
p: fromBig(pub.P),
q: fromBig(pub.Q),
g: fromBig(pub.G),
y: fromBig(pub.Y),
}
pk.setFingerPrintAndKeyId()
return pk
}
// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoElGamal,
PublicKey: pub,
p: fromBig(pub.P),
g: fromBig(pub.G),
y: fromBig(pub.Y),
}
pk.setFingerPrintAndKeyId()
return pk
}
func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoECDSA,
PublicKey: pub,
ec: new(ecdsaKey),
}
switch pub.Curve {
case elliptic.P256():
pk.ec.oid = oidCurveP256
case elliptic.P384():
pk.ec.oid = oidCurveP384
case elliptic.P521():
pk.ec.oid = oidCurveP521
default:
panic("unknown elliptic curve")
}
pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
// The bit length is 3 (for the 0x04 specifying an uncompressed key)
// plus two field elements (for x and y), which are rounded up to the
// nearest byte. See https://tools.ietf.org/html/rfc6637#section-6
fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7
pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes)
pk.setFingerPrintAndKeyId()
return pk
}
func (pk *PublicKey) parse(r io.Reader) (err error) {
// RFC 4880, section 5.5.2
var buf [6]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != 4 {
return errors.UnsupportedError("public key version")
}
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
err = pk.parseRSA(r)
case PubKeyAlgoDSA:
err = pk.parseDSA(r)
case PubKeyAlgoElGamal:
err = pk.parseElGamal(r)
case PubKeyAlgoECDSA:
pk.ec = new(ecdsaKey)
if err = pk.ec.parse(r); err != nil {
return err
}
pk.PublicKey, err = pk.ec.newECDSA()
case PubKeyAlgoECDH:
pk.ec = new(ecdsaKey)
if err = pk.ec.parse(r); err != nil {
return
}
pk.ecdh = new(ecdhKdf)
if err = pk.ecdh.parse(r); err != nil {
return
}
// The ECDH key is stored in an ecdsa.PublicKey for convenience.
pk.PublicKey, err = pk.ec.newECDSA()
default:
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
}
if err != nil {
return
}
pk.setFingerPrintAndKeyId()
return
}
func (pk *PublicKey) setFingerPrintAndKeyId() {
// RFC 4880, section 12.2
fingerPrint := sha1.New()
pk.SerializeSignaturePrefix(fingerPrint)
pk.serializeWithoutHeaders(fingerPrint)
copy(pk.Fingerprint[:], fingerPrint.Sum(nil))
pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
}
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
// section 5.5.2.
func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
pk.n.bytes, pk.n.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.e.bytes, pk.e.bitLength, err = readMPI(r)
if err != nil {
return
}
if len(pk.e.bytes) > 3 {
err = errors.UnsupportedError("large public exponent")
return
}
rsa := &rsa.PublicKey{
N: new(big.Int).SetBytes(pk.n.bytes),
E: 0,
}
for i := 0; i < len(pk.e.bytes); i++ {
rsa.E <<= 8
rsa.E |= int(pk.e.bytes[i])
}
pk.PublicKey = rsa
return
}
// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
// section 5.5.2.
func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.q.bytes, pk.q.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
if err != nil {
return
}
dsa := new(dsa.PublicKey)
dsa.P = new(big.Int).SetBytes(pk.p.bytes)
dsa.Q = new(big.Int).SetBytes(pk.q.bytes)
dsa.G = new(big.Int).SetBytes(pk.g.bytes)
dsa.Y = new(big.Int).SetBytes(pk.y.bytes)
pk.PublicKey = dsa
return
}
// parseElGamal parses ElGamal public key material from the given Reader. See
// RFC 4880, section 5.5.2.
func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
if err != nil {
return
}
elgamal := new(elgamal.PublicKey)
elgamal.P = new(big.Int).SetBytes(pk.p.bytes)
elgamal.G = new(big.Int).SetBytes(pk.g.bytes)
elgamal.Y = new(big.Int).SetBytes(pk.y.bytes)
pk.PublicKey = elgamal
return
}
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
// The prefix is used when calculating a signature over this public key. See
// RFC 4880, section 5.2.4.
func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) {
var pLength uint16
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
pLength += 2 + uint16(len(pk.n.bytes))
pLength += 2 + uint16(len(pk.e.bytes))
case PubKeyAlgoDSA:
pLength += 2 + uint16(len(pk.p.bytes))
pLength += 2 + uint16(len(pk.q.bytes))
pLength += 2 + uint16(len(pk.g.bytes))
pLength += 2 + uint16(len(pk.y.bytes))
case PubKeyAlgoElGamal:
pLength += 2 + uint16(len(pk.p.bytes))
pLength += 2 + uint16(len(pk.g.bytes))
pLength += 2 + uint16(len(pk.y.bytes))
case PubKeyAlgoECDSA:
pLength += uint16(pk.ec.byteLen())
case PubKeyAlgoECDH:
pLength += uint16(pk.ec.byteLen())
pLength += uint16(pk.ecdh.byteLen())
default:
panic("unknown public key algorithm")
}
pLength += 6
h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
return
}
func (pk *PublicKey) Serialize(w io.Writer) (err error) {
length := 6 // 6 byte header
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
length += 2 + len(pk.n.bytes)
length += 2 + len(pk.e.bytes)
case PubKeyAlgoDSA:
length += 2 + len(pk.p.bytes)
length += 2 + len(pk.q.bytes)
length += 2 + len(pk.g.bytes)
length += 2 + len(pk.y.bytes)
case PubKeyAlgoElGamal:
length += 2 + len(pk.p.bytes)
length += 2 + len(pk.g.bytes)
length += 2 + len(pk.y.bytes)
case PubKeyAlgoECDSA:
length += pk.ec.byteLen()
case PubKeyAlgoECDH:
length += pk.ec.byteLen()
length += pk.ecdh.byteLen()
default:
panic("unknown public key algorithm")
}
packetType := packetTypePublicKey
if pk.IsSubkey {
packetType = packetTypePublicSubkey
}
err = serializeHeader(w, packetType, length)
if err != nil {
return
}
return pk.serializeWithoutHeaders(w)
}
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
// OpenPGP public key packet, not including the packet header.
func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
var buf [6]byte
buf[0] = 4
t := uint32(pk.CreationTime.Unix())
buf[1] = byte(t >> 24)
buf[2] = byte(t >> 16)
buf[3] = byte(t >> 8)
buf[4] = byte(t)
buf[5] = byte(pk.PubKeyAlgo)
_, err = w.Write(buf[:])
if err != nil {
return
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
return writeMPIs(w, pk.n, pk.e)
case PubKeyAlgoDSA:
return writeMPIs(w, pk.p, pk.q, pk.g, pk.y)
case PubKeyAlgoElGamal:
return writeMPIs(w, pk.p, pk.g, pk.y)
case PubKeyAlgoECDSA:
return pk.ec.serialize(w)
case PubKeyAlgoECDH:
if err = pk.ec.serialize(w); err != nil {
return
}
return pk.ecdh.serialize(w)
}
return errors.InvalidArgumentError("bad public-key algorithm")
}
// CanSign returns true iff this public key can generate signatures
func (pk *PublicKey) CanSign() bool {
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal
}
// VerifySignature returns nil iff sig is a valid signature, made by this
// public key, of the data hashed into signed. signed is mutated by this call.
func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
if !pk.CanSign() {
return errors.InvalidArgumentError("public key cannot generate signatures")
}
signed.Write(sig.HashSuffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
return errors.SignatureError("hash tag doesn't match")
}
if pk.PubKeyAlgo != sig.PubKeyAlgo {
return errors.InvalidArgumentError("public key and signature use different algorithms")
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes))
if err != nil {
return errors.SignatureError("RSA verification failure")
}
return nil
case PubKeyAlgoDSA:
dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
if len(hashBytes) > subgroupSize {
hashBytes = hashBytes[:subgroupSize]
}
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
return errors.SignatureError("DSA verification failure")
}
return nil
case PubKeyAlgoECDSA:
ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) {
return errors.SignatureError("ECDSA verification failure")
}
return nil
default:
return errors.SignatureError("Unsupported public key algorithm used in signature")
}
}
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of the data hashed into signed. signed is mutated by this call.
func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
if !pk.CanSign() {
return errors.InvalidArgumentError("public key cannot generate signatures")
}
suffix := make([]byte, 5)
suffix[0] = byte(sig.SigType)
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
signed.Write(suffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
return errors.SignatureError("hash tag doesn't match")
}
if pk.PubKeyAlgo != sig.PubKeyAlgo {
return errors.InvalidArgumentError("public key and signature use different algorithms")
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil {
return errors.SignatureError("RSA verification failure")
}
return
case PubKeyAlgoDSA:
dsaPublicKey := pk.PublicKey.(*dsa.PublicKey)
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
if len(hashBytes) > subgroupSize {
hashBytes = hashBytes[:subgroupSize]
}
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
return errors.SignatureError("DSA verification failure")
}
return nil
default:
panic("shouldn't happen")
}
}
// keySignatureHash returns a Hash of the message that needs to be signed for
// pk to assert a subkey relationship to signed.
func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
if !hashFunc.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hashFunc.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
signed.SerializeSignaturePrefix(h)
signed.serializeWithoutHeaders(h)
return
}
// VerifyKeySignature returns nil iff sig is a valid signature, made by this
// public key, of signed.
func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
h, err := keySignatureHash(pk, signed, sig.Hash)
if err != nil {
return err
}
if err = pk.VerifySignature(h, sig); err != nil {
return err
}
if sig.FlagSign {
// Signing subkeys must be cross-signed. See
// https://www.gnupg.org/faq/subkey-cross-certify.html.
if sig.EmbeddedSignature == nil {
return errors.StructuralError("signing subkey is missing cross-signature")
}
// Verify the cross-signature. This is calculated over the same
// data as the main signature, so we cannot just recursively
// call signed.VerifyKeySignature(...)
if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil {
return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
}
if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
return errors.StructuralError("error while verifying cross-signature: " + err.Error())
}
}
return nil
}
func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
if !hashFunc.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hashFunc.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
return
}
// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
// public key.
func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
h, err := keyRevocationHash(pk, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignature(h, sig)
}
// userIdSignatureHash returns a Hash of the message that needs to be signed
// to assert that pk is a valid key for id.
func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
if !hashFunc.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hashFunc.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
var buf [5]byte
buf[0] = 0xb4
buf[1] = byte(len(id) >> 24)
buf[2] = byte(len(id) >> 16)
buf[3] = byte(len(id) >> 8)
buf[4] = byte(len(id))
h.Write(buf[:])
h.Write([]byte(id))
return
}
// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
// public key, that id is the identity of pub.
func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
h, err := userIdSignatureHash(id, pub, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignature(h, sig)
}
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
// public key, that id is the identity of pub.
func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) {
h, err := userIdSignatureV3Hash(id, pub, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// KeyIdString returns the public key's fingerprint in capital hex
// (e.g. "6C7EE1B8621CC013").
func (pk *PublicKey) KeyIdString() string {
return fmt.Sprintf("%X", pk.Fingerprint[12:20])
}
// KeyIdShortString returns the short form of public key's fingerprint
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
func (pk *PublicKey) KeyIdShortString() string {
return fmt.Sprintf("%X", pk.Fingerprint[16:20])
}
// A parsedMPI is used to store the contents of a big integer, along with the
// bit length that was specified in the original input. This allows the MPI to
// be reserialized exactly.
type parsedMPI struct {
bytes []byte
bitLength uint16
}
// writeMPIs is a utility function for serializing several big integers to the
// given Writer.
func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) {
for _, mpi := range mpis {
err = writeMPI(w, mpi.bitLength, mpi.bytes)
if err != nil {
return
}
}
return
}
// BitLength returns the bit length for the given public key.
func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
bitLength = pk.n.bitLength
case PubKeyAlgoDSA:
bitLength = pk.p.bitLength
case PubKeyAlgoElGamal:
bitLength = pk.p.bitLength
default:
err = errors.InvalidArgumentError("bad public-key algorithm")
}
return
}

View File

@ -0,0 +1,279 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"crypto/md5"
"crypto/rsa"
"encoding/binary"
"fmt"
"hash"
"io"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/errors"
)
// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and
// should not be used for signing or encrypting. They are supported here only for
// parsing version 3 key material and validating signatures.
// See RFC 4880, section 5.5.2.
type PublicKeyV3 struct {
CreationTime time.Time
DaysToExpire uint16
PubKeyAlgo PublicKeyAlgorithm
PublicKey *rsa.PublicKey
Fingerprint [16]byte
KeyId uint64
IsSubkey bool
n, e parsedMPI
}
// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey.
// Included here for testing purposes only. RFC 4880, section 5.5.2:
// "an implementation MUST NOT generate a V3 key, but MAY accept it."
func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 {
pk := &PublicKeyV3{
CreationTime: creationTime,
PublicKey: pub,
n: fromBig(pub.N),
e: fromBig(big.NewInt(int64(pub.E))),
}
pk.setFingerPrintAndKeyId()
return pk
}
func (pk *PublicKeyV3) parse(r io.Reader) (err error) {
// RFC 4880, section 5.5.2
var buf [8]byte
if _, err = readFull(r, buf[:]); err != nil {
return
}
if buf[0] < 2 || buf[0] > 3 {
return errors.UnsupportedError("public key version")
}
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7])
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7])
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
err = pk.parseRSA(r)
default:
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
}
if err != nil {
return
}
pk.setFingerPrintAndKeyId()
return
}
func (pk *PublicKeyV3) setFingerPrintAndKeyId() {
// RFC 4880, section 12.2
fingerPrint := md5.New()
fingerPrint.Write(pk.n.bytes)
fingerPrint.Write(pk.e.bytes)
fingerPrint.Sum(pk.Fingerprint[:0])
pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:])
}
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
// section 5.5.2.
func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) {
if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil {
return
}
if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil {
return
}
// RFC 4880 Section 12.2 requires the low 8 bytes of the
// modulus to form the key id.
if len(pk.n.bytes) < 8 {
return errors.StructuralError("v3 public key modulus is too short")
}
if len(pk.e.bytes) > 3 {
err = errors.UnsupportedError("large public exponent")
return
}
rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)}
for i := 0; i < len(pk.e.bytes); i++ {
rsa.E <<= 8
rsa.E |= int(pk.e.bytes[i])
}
pk.PublicKey = rsa
return
}
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
// The prefix is used when calculating a signature over this public key. See
// RFC 4880, section 5.2.4.
func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) {
var pLength uint16
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
pLength += 2 + uint16(len(pk.n.bytes))
pLength += 2 + uint16(len(pk.e.bytes))
default:
panic("unknown public key algorithm")
}
pLength += 6
w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
return
}
func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) {
length := 8 // 8 byte header
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
length += 2 + len(pk.n.bytes)
length += 2 + len(pk.e.bytes)
default:
panic("unknown public key algorithm")
}
packetType := packetTypePublicKey
if pk.IsSubkey {
packetType = packetTypePublicSubkey
}
if err = serializeHeader(w, packetType, length); err != nil {
return
}
return pk.serializeWithoutHeaders(w)
}
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
// OpenPGP public key packet, not including the packet header.
func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) {
var buf [8]byte
// Version 3
buf[0] = 3
// Creation time
t := uint32(pk.CreationTime.Unix())
buf[1] = byte(t >> 24)
buf[2] = byte(t >> 16)
buf[3] = byte(t >> 8)
buf[4] = byte(t)
// Days to expire
buf[5] = byte(pk.DaysToExpire >> 8)
buf[6] = byte(pk.DaysToExpire)
// Public key algorithm
buf[7] = byte(pk.PubKeyAlgo)
if _, err = w.Write(buf[:]); err != nil {
return
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
return writeMPIs(w, pk.n, pk.e)
}
return errors.InvalidArgumentError("bad public-key algorithm")
}
// CanSign returns true iff this public key can generate signatures
func (pk *PublicKeyV3) CanSign() bool {
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly
}
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of the data hashed into signed. signed is mutated by this call.
func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
if !pk.CanSign() {
return errors.InvalidArgumentError("public key cannot generate signatures")
}
suffix := make([]byte, 5)
suffix[0] = byte(sig.SigType)
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
signed.Write(suffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
return errors.SignatureError("hash tag doesn't match")
}
if pk.PubKeyAlgo != sig.PubKeyAlgo {
return errors.InvalidArgumentError("public key and signature use different algorithms")
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
return errors.SignatureError("RSA verification failure")
}
return
default:
// V3 public keys only support RSA.
panic("shouldn't happen")
}
}
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
// public key, that id is the identity of pub.
func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) {
h, err := userIdSignatureV3Hash(id, pk, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of signed.
func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) {
h, err := keySignatureHash(pk, signed, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// userIdSignatureV3Hash returns a Hash of the message that needs to be signed
// to assert that pk is a valid key for id.
func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) {
if !hfn.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hfn.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
h.Write([]byte(id))
return
}
// KeyIdString returns the public key's fingerprint in capital hex
// (e.g. "6C7EE1B8621CC013").
func (pk *PublicKeyV3) KeyIdString() string {
return fmt.Sprintf("%X", pk.KeyId)
}
// KeyIdShortString returns the short form of public key's fingerprint
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
func (pk *PublicKeyV3) KeyIdShortString() string {
return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF)
}
// BitLength returns the bit length for the given public key.
func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) {
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
bitLength = pk.n.bitLength
default:
err = errors.InvalidArgumentError("bad public-key algorithm")
}
return
}

76
vendor/golang.org/x/crypto/openpgp/packet/reader.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"golang.org/x/crypto/openpgp/errors"
"io"
)
// Reader reads packets from an io.Reader and allows packets to be 'unread' so
// that they result from the next call to Next.
type Reader struct {
q []Packet
readers []io.Reader
}
// New io.Readers are pushed when a compressed or encrypted packet is processed
// and recursively treated as a new source of packets. However, a carefully
// crafted packet can trigger an infinite recursive sequence of packets. See
// http://mumble.net/~campbell/misc/pgp-quine
// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
// This constant limits the number of recursive packets that may be pushed.
const maxReaders = 32
// Next returns the most recently unread Packet, or reads another packet from
// the top-most io.Reader. Unknown packet types are skipped.
func (r *Reader) Next() (p Packet, err error) {
if len(r.q) > 0 {
p = r.q[len(r.q)-1]
r.q = r.q[:len(r.q)-1]
return
}
for len(r.readers) > 0 {
p, err = Read(r.readers[len(r.readers)-1])
if err == nil {
return
}
if err == io.EOF {
r.readers = r.readers[:len(r.readers)-1]
continue
}
if _, ok := err.(errors.UnknownPacketTypeError); !ok {
return nil, err
}
}
return nil, io.EOF
}
// Push causes the Reader to start reading from a new io.Reader. When an EOF
// error is seen from the new io.Reader, it is popped and the Reader continues
// to read from the next most recent io.Reader. Push returns a StructuralError
// if pushing the reader would exceed the maximum recursion level, otherwise it
// returns nil.
func (r *Reader) Push(reader io.Reader) (err error) {
if len(r.readers) >= maxReaders {
return errors.StructuralError("too many layers of packets")
}
r.readers = append(r.readers, reader)
return nil
}
// Unread causes the given Packet to be returned from the next call to Next.
func (r *Reader) Unread(p Packet) {
r.q = append(r.q, p)
}
func NewReader(r io.Reader) *Reader {
return &Reader{
q: nil,
readers: []io.Reader{r},
}
}

731
vendor/golang.org/x/crypto/openpgp/packet/signature.go generated vendored Normal file
View File

@ -0,0 +1,731 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"encoding/asn1"
"encoding/binary"
"hash"
"io"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
const (
// See RFC 4880, section 5.2.3.21 for details.
KeyFlagCertify = 1 << iota
KeyFlagSign
KeyFlagEncryptCommunications
KeyFlagEncryptStorage
)
// Signature represents a signature. See RFC 4880, section 5.2.
type Signature struct {
SigType SignatureType
PubKeyAlgo PublicKeyAlgorithm
Hash crypto.Hash
// HashSuffix is extra data that is hashed in after the signed data.
HashSuffix []byte
// HashTag contains the first two bytes of the hash for fast rejection
// of bad signed data.
HashTag [2]byte
CreationTime time.Time
RSASignature parsedMPI
DSASigR, DSASigS parsedMPI
ECDSASigR, ECDSASigS parsedMPI
// rawSubpackets contains the unparsed subpackets, in order.
rawSubpackets []outputSubpacket
// The following are optional so are nil when not included in the
// signature.
SigLifetimeSecs, KeyLifetimeSecs *uint32
PreferredSymmetric, PreferredHash, PreferredCompression []uint8
IssuerKeyId *uint64
IsPrimaryId *bool
// FlagsValid is set if any flags were given. See RFC 4880, section
// 5.2.3.21 for details.
FlagsValid bool
FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool
// RevocationReason is set if this signature has been revoked.
// See RFC 4880, section 5.2.3.23 for details.
RevocationReason *uint8
RevocationReasonText string
// MDC is set if this signature has a feature packet that indicates
// support for MDC subpackets.
MDC bool
// EmbeddedSignature, if non-nil, is a signature of the parent key, by
// this key. This prevents an attacker from claiming another's signing
// subkey as their own.
EmbeddedSignature *Signature
outSubpackets []outputSubpacket
}
func (sig *Signature) parse(r io.Reader) (err error) {
// RFC 4880, section 5.2.3
var buf [5]byte
_, err = readFull(r, buf[:1])
if err != nil {
return
}
if buf[0] != 4 {
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
return
}
_, err = readFull(r, buf[:5])
if err != nil {
return
}
sig.SigType = SignatureType(buf[0])
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
default:
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
return
}
var ok bool
sig.Hash, ok = s2k.HashIdToHash(buf[2])
if !ok {
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
}
hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
l := 6 + hashedSubpacketsLength
sig.HashSuffix = make([]byte, l+6)
sig.HashSuffix[0] = 4
copy(sig.HashSuffix[1:], buf[:5])
hashedSubpackets := sig.HashSuffix[6:l]
_, err = readFull(r, hashedSubpackets)
if err != nil {
return
}
// See RFC 4880, section 5.2.4
trailer := sig.HashSuffix[l:]
trailer[0] = 4
trailer[1] = 0xff
trailer[2] = uint8(l >> 24)
trailer[3] = uint8(l >> 16)
trailer[4] = uint8(l >> 8)
trailer[5] = uint8(l)
err = parseSignatureSubpackets(sig, hashedSubpackets, true)
if err != nil {
return
}
_, err = readFull(r, buf[:2])
if err != nil {
return
}
unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1])
unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
_, err = readFull(r, unhashedSubpackets)
if err != nil {
return
}
err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
if err != nil {
return
}
_, err = readFull(r, sig.HashTag[:2])
if err != nil {
return
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
case PubKeyAlgoDSA:
sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r)
if err == nil {
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
}
case PubKeyAlgoECDSA:
sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r)
if err == nil {
sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r)
}
default:
panic("unreachable")
}
return
}
// parseSignatureSubpackets parses subpackets of the main signature packet. See
// RFC 4880, section 5.2.3.1.
func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
for len(subpackets) > 0 {
subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
if err != nil {
return
}
}
if sig.CreationTime.IsZero() {
err = errors.StructuralError("no creation time in signature")
}
return
}
type signatureSubpacketType uint8
const (
creationTimeSubpacket signatureSubpacketType = 2
signatureExpirationSubpacket signatureSubpacketType = 3
keyExpirationSubpacket signatureSubpacketType = 9
prefSymmetricAlgosSubpacket signatureSubpacketType = 11
issuerSubpacket signatureSubpacketType = 16
prefHashAlgosSubpacket signatureSubpacketType = 21
prefCompressionSubpacket signatureSubpacketType = 22
primaryUserIdSubpacket signatureSubpacketType = 25
keyFlagsSubpacket signatureSubpacketType = 27
reasonForRevocationSubpacket signatureSubpacketType = 29
featuresSubpacket signatureSubpacketType = 30
embeddedSignatureSubpacket signatureSubpacketType = 32
)
// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
// RFC 4880, section 5.2.3.1
var (
length uint32
packetType signatureSubpacketType
isCritical bool
)
switch {
case subpacket[0] < 192:
length = uint32(subpacket[0])
subpacket = subpacket[1:]
case subpacket[0] < 255:
if len(subpacket) < 2 {
goto Truncated
}
length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
subpacket = subpacket[2:]
default:
if len(subpacket) < 5 {
goto Truncated
}
length = uint32(subpacket[1])<<24 |
uint32(subpacket[2])<<16 |
uint32(subpacket[3])<<8 |
uint32(subpacket[4])
subpacket = subpacket[5:]
}
if length > uint32(len(subpacket)) {
goto Truncated
}
rest = subpacket[length:]
subpacket = subpacket[:length]
if len(subpacket) == 0 {
err = errors.StructuralError("zero length signature subpacket")
return
}
packetType = signatureSubpacketType(subpacket[0] & 0x7f)
isCritical = subpacket[0]&0x80 == 0x80
subpacket = subpacket[1:]
sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
switch packetType {
case creationTimeSubpacket:
if !isHashed {
err = errors.StructuralError("signature creation time in non-hashed area")
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("signature creation time not four bytes")
return
}
t := binary.BigEndian.Uint32(subpacket)
sig.CreationTime = time.Unix(int64(t), 0)
case signatureExpirationSubpacket:
// Signature expiration time, section 5.2.3.10
if !isHashed {
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("expiration subpacket with bad length")
return
}
sig.SigLifetimeSecs = new(uint32)
*sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
case keyExpirationSubpacket:
// Key expiration time, section 5.2.3.6
if !isHashed {
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("key expiration subpacket with bad length")
return
}
sig.KeyLifetimeSecs = new(uint32)
*sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
case prefSymmetricAlgosSubpacket:
// Preferred symmetric algorithms, section 5.2.3.7
if !isHashed {
return
}
sig.PreferredSymmetric = make([]byte, len(subpacket))
copy(sig.PreferredSymmetric, subpacket)
case issuerSubpacket:
// Issuer, section 5.2.3.5
if len(subpacket) != 8 {
err = errors.StructuralError("issuer subpacket with bad length")
return
}
sig.IssuerKeyId = new(uint64)
*sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
case prefHashAlgosSubpacket:
// Preferred hash algorithms, section 5.2.3.8
if !isHashed {
return
}
sig.PreferredHash = make([]byte, len(subpacket))
copy(sig.PreferredHash, subpacket)
case prefCompressionSubpacket:
// Preferred compression algorithms, section 5.2.3.9
if !isHashed {
return
}
sig.PreferredCompression = make([]byte, len(subpacket))
copy(sig.PreferredCompression, subpacket)
case primaryUserIdSubpacket:
// Primary User ID, section 5.2.3.19
if !isHashed {
return
}
if len(subpacket) != 1 {
err = errors.StructuralError("primary user id subpacket with bad length")
return
}
sig.IsPrimaryId = new(bool)
if subpacket[0] > 0 {
*sig.IsPrimaryId = true
}
case keyFlagsSubpacket:
// Key flags, section 5.2.3.21
if !isHashed {
return
}
if len(subpacket) == 0 {
err = errors.StructuralError("empty key flags subpacket")
return
}
sig.FlagsValid = true
if subpacket[0]&KeyFlagCertify != 0 {
sig.FlagCertify = true
}
if subpacket[0]&KeyFlagSign != 0 {
sig.FlagSign = true
}
if subpacket[0]&KeyFlagEncryptCommunications != 0 {
sig.FlagEncryptCommunications = true
}
if subpacket[0]&KeyFlagEncryptStorage != 0 {
sig.FlagEncryptStorage = true
}
case reasonForRevocationSubpacket:
// Reason For Revocation, section 5.2.3.23
if !isHashed {
return
}
if len(subpacket) == 0 {
err = errors.StructuralError("empty revocation reason subpacket")
return
}
sig.RevocationReason = new(uint8)
*sig.RevocationReason = subpacket[0]
sig.RevocationReasonText = string(subpacket[1:])
case featuresSubpacket:
// Features subpacket, section 5.2.3.24 specifies a very general
// mechanism for OpenPGP implementations to signal support for new
// features. In practice, the subpacket is used exclusively to
// indicate support for MDC-protected encryption.
sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1
case embeddedSignatureSubpacket:
// Only usage is in signatures that cross-certify
// signing subkeys. section 5.2.3.26 describes the
// format, with its usage described in section 11.1
if sig.EmbeddedSignature != nil {
err = errors.StructuralError("Cannot have multiple embedded signatures")
return
}
sig.EmbeddedSignature = new(Signature)
// Embedded signatures are required to be v4 signatures see
// section 12.1. However, we only parse v4 signatures in this
// file anyway.
if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
return nil, err
}
if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
}
default:
if isCritical {
err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
return
}
}
return
Truncated:
err = errors.StructuralError("signature subpacket truncated")
return
}
// subpacketLengthLength returns the length, in bytes, of an encoded length value.
func subpacketLengthLength(length int) int {
if length < 192 {
return 1
}
if length < 16320 {
return 2
}
return 5
}
// serializeSubpacketLength marshals the given length into to.
func serializeSubpacketLength(to []byte, length int) int {
// RFC 4880, Section 4.2.2.
if length < 192 {
to[0] = byte(length)
return 1
}
if length < 16320 {
length -= 192
to[0] = byte((length >> 8) + 192)
to[1] = byte(length)
return 2
}
to[0] = 255
to[1] = byte(length >> 24)
to[2] = byte(length >> 16)
to[3] = byte(length >> 8)
to[4] = byte(length)
return 5
}
// subpacketsLength returns the serialized length, in bytes, of the given
// subpackets.
func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
for _, subpacket := range subpackets {
if subpacket.hashed == hashed {
length += subpacketLengthLength(len(subpacket.contents) + 1)
length += 1 // type byte
length += len(subpacket.contents)
}
}
return
}
// serializeSubpackets marshals the given subpackets into to.
func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
for _, subpacket := range subpackets {
if subpacket.hashed == hashed {
n := serializeSubpacketLength(to, len(subpacket.contents)+1)
to[n] = byte(subpacket.subpacketType)
to = to[1+n:]
n = copy(to, subpacket.contents)
to = to[n:]
}
}
return
}
// KeyExpired returns whether sig is a self-signature of a key that has
// expired.
func (sig *Signature) KeyExpired(currentTime time.Time) bool {
if sig.KeyLifetimeSecs == nil {
return false
}
expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
return currentTime.After(expiry)
}
// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
func (sig *Signature) buildHashSuffix() (err error) {
hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
var ok bool
l := 6 + hashedSubpacketsLen
sig.HashSuffix = make([]byte, l+6)
sig.HashSuffix[0] = 4
sig.HashSuffix[1] = uint8(sig.SigType)
sig.HashSuffix[2] = uint8(sig.PubKeyAlgo)
sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
if !ok {
sig.HashSuffix = nil
return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
}
sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
sig.HashSuffix[5] = byte(hashedSubpacketsLen)
serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true)
trailer := sig.HashSuffix[l:]
trailer[0] = 4
trailer[1] = 0xff
trailer[2] = byte(l >> 24)
trailer[3] = byte(l >> 16)
trailer[4] = byte(l >> 8)
trailer[5] = byte(l)
return
}
func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
err = sig.buildHashSuffix()
if err != nil {
return
}
h.Write(sig.HashSuffix)
digest = h.Sum(nil)
copy(sig.HashTag[:], digest)
return
}
// Sign signs a message with a private key. The hash, h, must contain
// the hash of the message to be signed and will be mutated by this function.
// On success, the signature is stored in sig. Call Serialize to write it out.
// If config is nil, sensible defaults will be used.
func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
sig.outSubpackets = sig.buildSubpackets()
digest, err := sig.signPrepareHash(h)
if err != nil {
return
}
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
// supports both *rsa.PrivateKey and crypto.Signer
sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes))
case PubKeyAlgoDSA:
dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
if len(digest) > subgroupSize {
digest = digest[:subgroupSize]
}
r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
if err == nil {
sig.DSASigR.bytes = r.Bytes()
sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes))
sig.DSASigS.bytes = s.Bytes()
sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
}
case PubKeyAlgoECDSA:
var r, s *big.Int
if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok {
// direct support, avoid asn1 wrapping/unwrapping
r, s, err = ecdsa.Sign(config.Random(), pk, digest)
} else {
var b []byte
b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil)
if err == nil {
r, s, err = unwrapECDSASig(b)
}
}
if err == nil {
sig.ECDSASigR = fromBig(r)
sig.ECDSASigS = fromBig(s)
}
default:
err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
}
return
}
// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA
// signature.
func unwrapECDSASig(b []byte) (r, s *big.Int, err error) {
var ecsdaSig struct {
R, S *big.Int
}
_, err = asn1.Unmarshal(b, &ecsdaSig)
if err != nil {
return
}
return ecsdaSig.R, ecsdaSig.S, nil
}
// SignUserId computes a signature from priv, asserting that pub is a valid
// key for the identity id. On success, the signature is stored in sig. Call
// Serialize to write it out.
// If config is nil, sensible defaults will be used.
func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
h, err := userIdSignatureHash(id, pub, sig.Hash)
if err != nil {
return err
}
return sig.Sign(h, priv, config)
}
// SignKey computes a signature from priv, asserting that pub is a subkey. On
// success, the signature is stored in sig. Call Serialize to write it out.
// If config is nil, sensible defaults will be used.
func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash)
if err != nil {
return err
}
return sig.Sign(h, priv, config)
}
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
// called first.
func (sig *Signature) Serialize(w io.Writer) (err error) {
if len(sig.outSubpackets) == 0 {
sig.outSubpackets = sig.rawSubpackets
}
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil {
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
}
sigLength := 0
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sigLength = 2 + len(sig.RSASignature.bytes)
case PubKeyAlgoDSA:
sigLength = 2 + len(sig.DSASigR.bytes)
sigLength += 2 + len(sig.DSASigS.bytes)
case PubKeyAlgoECDSA:
sigLength = 2 + len(sig.ECDSASigR.bytes)
sigLength += 2 + len(sig.ECDSASigS.bytes)
default:
panic("impossible")
}
unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
length := len(sig.HashSuffix) - 6 /* trailer not included */ +
2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
2 /* hash tag */ + sigLength
err = serializeHeader(w, packetTypeSignature, length)
if err != nil {
return
}
_, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6])
if err != nil {
return
}
unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
_, err = w.Write(unhashedSubpackets)
if err != nil {
return
}
_, err = w.Write(sig.HashTag[:])
if err != nil {
return
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
err = writeMPIs(w, sig.RSASignature)
case PubKeyAlgoDSA:
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
case PubKeyAlgoECDSA:
err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS)
default:
panic("impossible")
}
return
}
// outputSubpacket represents a subpacket to be marshaled.
type outputSubpacket struct {
hashed bool // true if this subpacket is in the hashed area.
subpacketType signatureSubpacketType
isCritical bool
contents []byte
}
func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
creationTime := make([]byte, 4)
binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
if sig.IssuerKeyId != nil {
keyId := make([]byte, 8)
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
}
if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
sigLifetime := make([]byte, 4)
binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
}
// Key flags may only appear in self-signatures or certification signatures.
if sig.FlagsValid {
var flags byte
if sig.FlagCertify {
flags |= KeyFlagCertify
}
if sig.FlagSign {
flags |= KeyFlagSign
}
if sig.FlagEncryptCommunications {
flags |= KeyFlagEncryptCommunications
}
if sig.FlagEncryptStorage {
flags |= KeyFlagEncryptStorage
}
subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
}
// The following subpackets may only appear in self-signatures
if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
keyLifetime := make([]byte, 4)
binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
}
if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
}
if len(sig.PreferredSymmetric) > 0 {
subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
}
if len(sig.PreferredHash) > 0 {
subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
}
if len(sig.PreferredCompression) > 0 {
subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
}
return
}

View File

@ -0,0 +1,146 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"encoding/binary"
"fmt"
"io"
"strconv"
"time"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
// SignatureV3 represents older version 3 signatures. These signatures are less secure
// than version 4 and should not be used to create new signatures. They are included
// here for backwards compatibility to read and validate with older key material.
// See RFC 4880, section 5.2.2.
type SignatureV3 struct {
SigType SignatureType
CreationTime time.Time
IssuerKeyId uint64
PubKeyAlgo PublicKeyAlgorithm
Hash crypto.Hash
HashTag [2]byte
RSASignature parsedMPI
DSASigR, DSASigS parsedMPI
}
func (sig *SignatureV3) parse(r io.Reader) (err error) {
// RFC 4880, section 5.2.2
var buf [8]byte
if _, err = readFull(r, buf[:1]); err != nil {
return
}
if buf[0] < 2 || buf[0] > 3 {
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
return
}
if _, err = readFull(r, buf[:1]); err != nil {
return
}
if buf[0] != 5 {
err = errors.UnsupportedError(
"invalid hashed material length " + strconv.Itoa(int(buf[0])))
return
}
// Read hashed material: signature type + creation time
if _, err = readFull(r, buf[:5]); err != nil {
return
}
sig.SigType = SignatureType(buf[0])
t := binary.BigEndian.Uint32(buf[1:5])
sig.CreationTime = time.Unix(int64(t), 0)
// Eight-octet Key ID of signer.
if _, err = readFull(r, buf[:8]); err != nil {
return
}
sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:])
// Public-key and hash algorithm
if _, err = readFull(r, buf[:2]); err != nil {
return
}
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0])
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
default:
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
return
}
var ok bool
if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok {
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
}
// Two-octet field holding left 16 bits of signed hash value.
if _, err = readFull(r, sig.HashTag[:2]); err != nil {
return
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
case PubKeyAlgoDSA:
if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil {
return
}
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
default:
panic("unreachable")
}
return
}
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
// called first.
func (sig *SignatureV3) Serialize(w io.Writer) (err error) {
buf := make([]byte, 8)
// Write the sig type and creation time
buf[0] = byte(sig.SigType)
binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix()))
if _, err = w.Write(buf[:5]); err != nil {
return
}
// Write the issuer long key ID
binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId)
if _, err = w.Write(buf[:8]); err != nil {
return
}
// Write public key algorithm, hash ID, and hash value
buf[0] = byte(sig.PubKeyAlgo)
hashId, ok := s2k.HashToHashId(sig.Hash)
if !ok {
return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash))
}
buf[1] = hashId
copy(buf[2:4], sig.HashTag[:])
if _, err = w.Write(buf[:4]); err != nil {
return
}
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil {
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
err = writeMPIs(w, sig.RSASignature)
case PubKeyAlgoDSA:
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
default:
panic("impossible")
}
return
}

View File

@ -0,0 +1,155 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto/cipher"
"io"
"strconv"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
// This is the largest session key that we'll support. Since no 512-bit cipher
// has even been seriously used, this is comfortably large.
const maxSessionKeySizeInBytes = 64
// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
// 4880, section 5.3.
type SymmetricKeyEncrypted struct {
CipherFunc CipherFunction
s2k func(out, in []byte)
encryptedKey []byte
}
const symmetricKeyEncryptedVersion = 4
func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
// RFC 4880, section 5.3.
var buf [2]byte
if _, err := readFull(r, buf[:]); err != nil {
return err
}
if buf[0] != symmetricKeyEncryptedVersion {
return errors.UnsupportedError("SymmetricKeyEncrypted version")
}
ske.CipherFunc = CipherFunction(buf[1])
if ske.CipherFunc.KeySize() == 0 {
return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
}
var err error
ske.s2k, err = s2k.Parse(r)
if err != nil {
return err
}
encryptedKey := make([]byte, maxSessionKeySizeInBytes)
// The session key may follow. We just have to try and read to find
// out. If it exists then we limit it to maxSessionKeySizeInBytes.
n, err := readFull(r, encryptedKey)
if err != nil && err != io.ErrUnexpectedEOF {
return err
}
if n != 0 {
if n == maxSessionKeySizeInBytes {
return errors.UnsupportedError("oversized encrypted session key")
}
ske.encryptedKey = encryptedKey[:n]
}
return nil
}
// Decrypt attempts to decrypt an encrypted session key and returns the key and
// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
// packet.
func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
key := make([]byte, ske.CipherFunc.KeySize())
ske.s2k(key, passphrase)
if len(ske.encryptedKey) == 0 {
return key, ske.CipherFunc, nil
}
// the IV is all zeros
iv := make([]byte, ske.CipherFunc.blockSize())
c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
plaintextKey := make([]byte, len(ske.encryptedKey))
c.XORKeyStream(plaintextKey, ske.encryptedKey)
cipherFunc := CipherFunction(plaintextKey[0])
if cipherFunc.blockSize() == 0 {
return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
}
plaintextKey = plaintextKey[1:]
if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() {
return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " +
"not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")")
}
return plaintextKey, cipherFunc, nil
}
// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The
// packet contains a random session key, encrypted by a key derived from the
// given passphrase. The session key is returned and must be passed to
// SerializeSymmetricallyEncrypted.
// If config is nil, sensible defaults will be used.
func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
cipherFunc := config.Cipher()
keySize := cipherFunc.KeySize()
if keySize == 0 {
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
}
s2kBuf := new(bytes.Buffer)
keyEncryptingKey := make([]byte, keySize)
// s2k.Serialize salts and stretches the passphrase, and writes the
// resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
if err != nil {
return
}
s2kBytes := s2kBuf.Bytes()
packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
if err != nil {
return
}
var buf [2]byte
buf[0] = symmetricKeyEncryptedVersion
buf[1] = byte(cipherFunc)
_, err = w.Write(buf[:])
if err != nil {
return
}
_, err = w.Write(s2kBytes)
if err != nil {
return
}
sessionKey := make([]byte, keySize)
_, err = io.ReadFull(config.Random(), sessionKey)
if err != nil {
return
}
iv := make([]byte, cipherFunc.blockSize())
c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
encryptedCipherAndKey := make([]byte, keySize+1)
c.XORKeyStream(encryptedCipherAndKey, buf[1:])
c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
_, err = w.Write(encryptedCipherAndKey)
if err != nil {
return
}
key = sessionKey
return
}

View File

@ -0,0 +1,290 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/cipher"
"crypto/sha1"
"crypto/subtle"
"golang.org/x/crypto/openpgp/errors"
"hash"
"io"
"strconv"
)
// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
// encrypted contents will consist of more OpenPGP packets. See RFC 4880,
// sections 5.7 and 5.13.
type SymmetricallyEncrypted struct {
MDC bool // true iff this is a type 18 packet and thus has an embedded MAC.
contents io.Reader
prefix []byte
}
const symmetricallyEncryptedVersion = 1
func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
if se.MDC {
// See RFC 4880, section 5.13.
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
if buf[0] != symmetricallyEncryptedVersion {
return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
}
}
se.contents = r
return nil
}
// Decrypt returns a ReadCloser, from which the decrypted contents of the
// packet can be read. An incorrect key can, with high probability, be detected
// immediately and this will result in a KeyIncorrect error being returned.
func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
keySize := c.KeySize()
if keySize == 0 {
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
}
if len(key) != keySize {
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
}
if se.prefix == nil {
se.prefix = make([]byte, c.blockSize()+2)
_, err := readFull(se.contents, se.prefix)
if err != nil {
return nil, err
}
} else if len(se.prefix) != c.blockSize()+2 {
return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
}
ocfbResync := OCFBResync
if se.MDC {
// MDC packets use a different form of OCFB mode.
ocfbResync = OCFBNoResync
}
s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
if s == nil {
return nil, errors.ErrKeyIncorrect
}
plaintext := cipher.StreamReader{S: s, R: se.contents}
if se.MDC {
// MDC packets have an embedded hash that we need to check.
h := sha1.New()
h.Write(se.prefix)
return &seMDCReader{in: plaintext, h: h}, nil
}
// Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
return seReader{plaintext}, nil
}
// seReader wraps an io.Reader with a no-op Close method.
type seReader struct {
in io.Reader
}
func (ser seReader) Read(buf []byte) (int, error) {
return ser.in.Read(buf)
}
func (ser seReader) Close() error {
return nil
}
const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
// MDC packet containing a hash of the previous contents which is checked
// against the running hash. See RFC 4880, section 5.13.
type seMDCReader struct {
in io.Reader
h hash.Hash
trailer [mdcTrailerSize]byte
scratch [mdcTrailerSize]byte
trailerUsed int
error bool
eof bool
}
func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
if ser.error {
err = io.ErrUnexpectedEOF
return
}
if ser.eof {
err = io.EOF
return
}
// If we haven't yet filled the trailer buffer then we must do that
// first.
for ser.trailerUsed < mdcTrailerSize {
n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
ser.trailerUsed += n
if err == io.EOF {
if ser.trailerUsed != mdcTrailerSize {
n = 0
err = io.ErrUnexpectedEOF
ser.error = true
return
}
ser.eof = true
n = 0
return
}
if err != nil {
n = 0
return
}
}
// If it's a short read then we read into a temporary buffer and shift
// the data into the caller's buffer.
if len(buf) <= mdcTrailerSize {
n, err = readFull(ser.in, ser.scratch[:len(buf)])
copy(buf, ser.trailer[:n])
ser.h.Write(buf[:n])
copy(ser.trailer[:], ser.trailer[n:])
copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
if n < len(buf) {
ser.eof = true
err = io.EOF
}
return
}
n, err = ser.in.Read(buf[mdcTrailerSize:])
copy(buf, ser.trailer[:])
ser.h.Write(buf[:n])
copy(ser.trailer[:], buf[n:])
if err == io.EOF {
ser.eof = true
}
return
}
// This is a new-format packet tag byte for a type 19 (MDC) packet.
const mdcPacketTagByte = byte(0x80) | 0x40 | 19
func (ser *seMDCReader) Close() error {
if ser.error {
return errors.SignatureError("error during reading")
}
for !ser.eof {
// We haven't seen EOF so we need to read to the end
var buf [1024]byte
_, err := ser.Read(buf[:])
if err == io.EOF {
break
}
if err != nil {
return errors.SignatureError("error during reading")
}
}
if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
return errors.SignatureError("MDC packet not found")
}
ser.h.Write(ser.trailer[:2])
final := ser.h.Sum(nil)
if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
return errors.SignatureError("hash mismatch")
}
return nil
}
// An seMDCWriter writes through to an io.WriteCloser while maintains a running
// hash of the data written. On close, it emits an MDC packet containing the
// running hash.
type seMDCWriter struct {
w io.WriteCloser
h hash.Hash
}
func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
w.h.Write(buf)
return w.w.Write(buf)
}
func (w *seMDCWriter) Close() (err error) {
var buf [mdcTrailerSize]byte
buf[0] = mdcPacketTagByte
buf[1] = sha1.Size
w.h.Write(buf[:2])
digest := w.h.Sum(nil)
copy(buf[2:], digest)
_, err = w.w.Write(buf[:])
if err != nil {
return
}
return w.w.Close()
}
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
type noOpCloser struct {
w io.Writer
}
func (c noOpCloser) Write(data []byte) (n int, err error) {
return c.w.Write(data)
}
func (c noOpCloser) Close() error {
return nil
}
// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
// to w and returns a WriteCloser to which the to-be-encrypted packets can be
// written.
// If config is nil, sensible defaults will be used.
func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) {
if c.KeySize() != len(key) {
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
}
writeCloser := noOpCloser{w}
ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
if err != nil {
return
}
_, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion})
if err != nil {
return
}
block := c.new(key)
blockSize := block.BlockSize()
iv := make([]byte, blockSize)
_, err = config.Random().Read(iv)
if err != nil {
return
}
s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
_, err = ciphertext.Write(prefix)
if err != nil {
return
}
plaintext := cipher.StreamWriter{S: s, W: ciphertext}
h := sha1.New()
h.Write(iv)
h.Write(iv[blockSize-2:])
contents = &seMDCWriter{w: plaintext, h: h}
return
}

View File

@ -0,0 +1,91 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"image"
"image/jpeg"
"io"
"io/ioutil"
)
const UserAttrImageSubpacket = 1
// UserAttribute is capable of storing other types of data about a user
// beyond name, email and a text comment. In practice, user attributes are typically used
// to store a signed thumbnail photo JPEG image of the user.
// See RFC 4880, section 5.12.
type UserAttribute struct {
Contents []*OpaqueSubpacket
}
// NewUserAttributePhoto creates a user attribute packet
// containing the given images.
func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
uat = new(UserAttribute)
for _, photo := range photos {
var buf bytes.Buffer
// RFC 4880, Section 5.12.1.
data := []byte{
0x10, 0x00, // Little-endian image header length (16 bytes)
0x01, // Image header version 1
0x01, // JPEG
0, 0, 0, 0, // 12 reserved octets, must be all zero.
0, 0, 0, 0,
0, 0, 0, 0}
if _, err = buf.Write(data); err != nil {
return
}
if err = jpeg.Encode(&buf, photo, nil); err != nil {
return
}
uat.Contents = append(uat.Contents, &OpaqueSubpacket{
SubType: UserAttrImageSubpacket,
Contents: buf.Bytes()})
}
return
}
// NewUserAttribute creates a new user attribute packet containing the given subpackets.
func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
return &UserAttribute{Contents: contents}
}
func (uat *UserAttribute) parse(r io.Reader) (err error) {
// RFC 4880, section 5.13
b, err := ioutil.ReadAll(r)
if err != nil {
return
}
uat.Contents, err = OpaqueSubpackets(b)
return
}
// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
// header.
func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
var buf bytes.Buffer
for _, sp := range uat.Contents {
sp.Serialize(&buf)
}
if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
return err
}
_, err = w.Write(buf.Bytes())
return
}
// ImageData returns zero or more byte slices, each containing
// JPEG File Interchange Format (JFIF), for each photo in the
// the user attribute packet.
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
for _, sp := range uat.Contents {
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
imageData = append(imageData, sp.Contents[16:])
}
}
return
}

160
vendor/golang.org/x/crypto/openpgp/packet/userid.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"io"
"io/ioutil"
"strings"
)
// UserId contains text that is intended to represent the name and email
// address of the key holder. See RFC 4880, section 5.11. By convention, this
// takes the form "Full Name (Comment) <email@example.com>"
type UserId struct {
Id string // By convention, this takes the form "Full Name (Comment) <email@example.com>" which is split out in the fields below.
Name, Comment, Email string
}
func hasInvalidCharacters(s string) bool {
for _, c := range s {
switch c {
case '(', ')', '<', '>', 0:
return true
}
}
return false
}
// NewUserId returns a UserId or nil if any of the arguments contain invalid
// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
func NewUserId(name, comment, email string) *UserId {
// RFC 4880 doesn't deal with the structure of userid strings; the
// name, comment and email form is just a convention. However, there's
// no convention about escaping the metacharacters and GPG just refuses
// to create user ids where, say, the name contains a '('. We mirror
// this behaviour.
if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
return nil
}
uid := new(UserId)
uid.Name, uid.Comment, uid.Email = name, comment, email
uid.Id = name
if len(comment) > 0 {
if len(uid.Id) > 0 {
uid.Id += " "
}
uid.Id += "("
uid.Id += comment
uid.Id += ")"
}
if len(email) > 0 {
if len(uid.Id) > 0 {
uid.Id += " "
}
uid.Id += "<"
uid.Id += email
uid.Id += ">"
}
return uid
}
func (uid *UserId) parse(r io.Reader) (err error) {
// RFC 4880, section 5.11
b, err := ioutil.ReadAll(r)
if err != nil {
return
}
uid.Id = string(b)
uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
return
}
// Serialize marshals uid to w in the form of an OpenPGP packet, including
// header.
func (uid *UserId) Serialize(w io.Writer) error {
err := serializeHeader(w, packetTypeUserId, len(uid.Id))
if err != nil {
return err
}
_, err = w.Write([]byte(uid.Id))
return err
}
// parseUserId extracts the name, comment and email from a user id string that
// is formatted as "Full Name (Comment) <email@example.com>".
func parseUserId(id string) (name, comment, email string) {
var n, c, e struct {
start, end int
}
var state int
for offset, rune := range id {
switch state {
case 0:
// Entering name
n.start = offset
state = 1
fallthrough
case 1:
// In name
if rune == '(' {
state = 2
n.end = offset
} else if rune == '<' {
state = 5
n.end = offset
}
case 2:
// Entering comment
c.start = offset
state = 3
fallthrough
case 3:
// In comment
if rune == ')' {
state = 4
c.end = offset
}
case 4:
// Between comment and email
if rune == '<' {
state = 5
}
case 5:
// Entering email
e.start = offset
state = 6
fallthrough
case 6:
// In email
if rune == '>' {
state = 7
e.end = offset
}
default:
// After email
}
}
switch state {
case 1:
// ended in the name
n.end = len(id)
case 3:
// ended in comment
c.end = len(id)
case 6:
// ended in email
e.end = len(id)
}
name = strings.TrimSpace(id[n.start:n.end])
comment = strings.TrimSpace(id[c.start:c.end])
email = strings.TrimSpace(id[e.start:e.end])
return
}

442
vendor/golang.org/x/crypto/openpgp/read.go generated vendored Normal file
View File

@ -0,0 +1,442 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package openpgp implements high level operations on OpenPGP messages.
package openpgp // import "golang.org/x/crypto/openpgp"
import (
"crypto"
_ "crypto/sha256"
"hash"
"io"
"strconv"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
)
// SignatureType is the armor type for a PGP signature.
var SignatureType = "PGP SIGNATURE"
// readArmored reads an armored block with the given type.
func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
block, err := armor.Decode(r)
if err != nil {
return
}
if block.Type != expectedType {
return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
}
return block.Body, nil
}
// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
// signed message.
type MessageDetails struct {
IsEncrypted bool // true if the message was encrypted.
EncryptedToKeyIds []uint64 // the list of recipient key ids.
IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
DecryptedWith Key // the private key used to decrypt the message, if any.
IsSigned bool // true if the message is signed.
SignedByKeyId uint64 // the key id of the signer, if any.
SignedBy *Key // the key of the signer, if available.
LiteralData *packet.LiteralData // the metadata of the contents
UnverifiedBody io.Reader // the contents of the message.
// If IsSigned is true and SignedBy is non-zero then the signature will
// be verified as UnverifiedBody is read. The signature cannot be
// checked until the whole of UnverifiedBody is read so UnverifiedBody
// must be consumed until EOF before the data can be trusted. Even if a
// message isn't signed (or the signer is unknown) the data may contain
// an authentication code that is only checked once UnverifiedBody has
// been consumed. Once EOF has been seen, the following fields are
// valid. (An authentication code failure is reported as a
// SignatureError error when reading from UnverifiedBody.)
SignatureError error // nil if the signature is good.
Signature *packet.Signature // the signature packet itself, if v4 (default)
SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
decrypted io.ReadCloser
}
// A PromptFunction is used as a callback by functions that may need to decrypt
// a private key, or prompt for a passphrase. It is called with a list of
// acceptable, encrypted private keys and a boolean that indicates whether a
// passphrase is usable. It should either decrypt a private key or return a
// passphrase to try. If the decrypted private key or given passphrase isn't
// correct, the function will be called again, forever. Any error returned will
// be passed up.
type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
// A keyEnvelopePair is used to store a private key with the envelope that
// contains a symmetric key, encrypted with that key.
type keyEnvelopePair struct {
key Key
encryptedKey *packet.EncryptedKey
}
// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
// The given KeyRing should contain both public keys (for signature
// verification) and, possibly encrypted, private keys for decrypting.
// If config is nil, sensible defaults will be used.
func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
var p packet.Packet
var symKeys []*packet.SymmetricKeyEncrypted
var pubKeys []keyEnvelopePair
var se *packet.SymmetricallyEncrypted
packets := packet.NewReader(r)
md = new(MessageDetails)
md.IsEncrypted = true
// The message, if encrypted, starts with a number of packets
// containing an encrypted decryption key. The decryption key is either
// encrypted to a public key, or with a passphrase. This loop
// collects these packets.
ParsePackets:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.SymmetricKeyEncrypted:
// This packet contains the decryption key encrypted with a passphrase.
md.IsSymmetricallyEncrypted = true
symKeys = append(symKeys, p)
case *packet.EncryptedKey:
// This packet contains the decryption key encrypted to a public key.
md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
switch p.Algo {
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal:
break
default:
continue
}
var keys []Key
if p.KeyId == 0 {
keys = keyring.DecryptionKeys()
} else {
keys = keyring.KeysById(p.KeyId)
}
for _, k := range keys {
pubKeys = append(pubKeys, keyEnvelopePair{k, p})
}
case *packet.SymmetricallyEncrypted:
se = p
break ParsePackets
case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
// This message isn't encrypted.
if len(symKeys) != 0 || len(pubKeys) != 0 {
return nil, errors.StructuralError("key material not followed by encrypted message")
}
packets.Unread(p)
return readSignedMessage(packets, nil, keyring)
}
}
var candidates []Key
var decrypted io.ReadCloser
// Now that we have the list of encrypted keys we need to decrypt at
// least one of them or, if we cannot, we need to call the prompt
// function so that it can decrypt a key or give us a passphrase.
FindKey:
for {
// See if any of the keys already have a private key available
candidates = candidates[:0]
candidateFingerprints := make(map[string]bool)
for _, pk := range pubKeys {
if pk.key.PrivateKey == nil {
continue
}
if !pk.key.PrivateKey.Encrypted {
if len(pk.encryptedKey.Key) == 0 {
pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
}
if len(pk.encryptedKey.Key) == 0 {
continue
}
decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
md.DecryptedWith = pk.key
break FindKey
}
} else {
fpr := string(pk.key.PublicKey.Fingerprint[:])
if v := candidateFingerprints[fpr]; v {
continue
}
candidates = append(candidates, pk.key)
candidateFingerprints[fpr] = true
}
}
if len(candidates) == 0 && len(symKeys) == 0 {
return nil, errors.ErrKeyIncorrect
}
if prompt == nil {
return nil, errors.ErrKeyIncorrect
}
passphrase, err := prompt(candidates, len(symKeys) != 0)
if err != nil {
return nil, err
}
// Try the symmetric passphrase first
if len(symKeys) != 0 && passphrase != nil {
for _, s := range symKeys {
key, cipherFunc, err := s.Decrypt(passphrase)
if err == nil {
decrypted, err = se.Decrypt(cipherFunc, key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
break FindKey
}
}
}
}
}
md.decrypted = decrypted
if err := packets.Push(decrypted); err != nil {
return nil, err
}
return readSignedMessage(packets, md, keyring)
}
// readSignedMessage reads a possibly signed message if mdin is non-zero then
// that structure is updated and returned. Otherwise a fresh MessageDetails is
// used.
func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
if mdin == nil {
mdin = new(MessageDetails)
}
md = mdin
var p packet.Packet
var h hash.Hash
var wrappedHash hash.Hash
FindLiteralData:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.Compressed:
if err := packets.Push(p.Body); err != nil {
return nil, err
}
case *packet.OnePassSignature:
if !p.IsLast {
return nil, errors.UnsupportedError("nested signatures")
}
h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
if err != nil {
md = nil
return
}
md.IsSigned = true
md.SignedByKeyId = p.KeyId
keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
if len(keys) > 0 {
md.SignedBy = &keys[0]
}
case *packet.LiteralData:
md.LiteralData = p
break FindLiteralData
}
}
if md.SignedBy != nil {
md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
} else if md.decrypted != nil {
md.UnverifiedBody = checkReader{md}
} else {
md.UnverifiedBody = md.LiteralData.Body
}
return md, nil
}
// hashForSignature returns a pair of hashes that can be used to verify a
// signature. The signature may specify that the contents of the signed message
// should be preprocessed (i.e. to normalize line endings). Thus this function
// returns two hashes. The second should be used to hash the message itself and
// performs any needed preprocessing.
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
if !hashId.Available() {
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
}
h := hashId.New()
switch sigType {
case packet.SigTypeBinary:
return h, h, nil
case packet.SigTypeText:
return h, NewCanonicalTextHash(h), nil
}
return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
}
// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
// MDC checks.
type checkReader struct {
md *MessageDetails
}
func (cr checkReader) Read(buf []byte) (n int, err error) {
n, err = cr.md.LiteralData.Body.Read(buf)
if err == io.EOF {
mdcErr := cr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
return
}
// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
// the data as it is read. When it sees an EOF from the underlying io.Reader
// it parses and checks a trailing Signature packet and triggers any MDC checks.
type signatureCheckReader struct {
packets *packet.Reader
h, wrappedHash hash.Hash
md *MessageDetails
}
func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
n, err = scr.md.LiteralData.Body.Read(buf)
scr.wrappedHash.Write(buf[:n])
if err == io.EOF {
var p packet.Packet
p, scr.md.SignatureError = scr.packets.Next()
if scr.md.SignatureError != nil {
return
}
var ok bool
if scr.md.Signature, ok = p.(*packet.Signature); ok {
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
} else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
} else {
scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
return
}
// The SymmetricallyEncrypted packet, if any, might have an
// unsigned hash of its own. In order to check this we need to
// close that Reader.
if scr.md.decrypted != nil {
mdcErr := scr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
}
return
}
// CheckDetachedSignature takes a signed file and a detached signature and
// returns the signer if the signature is valid. If the signer isn't known,
// ErrUnknownIssuer is returned.
func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
var issuerKeyId uint64
var hashFunc crypto.Hash
var sigType packet.SignatureType
var keys []Key
var p packet.Packet
packets := packet.NewReader(signature)
for {
p, err = packets.Next()
if err == io.EOF {
return nil, errors.ErrUnknownIssuer
}
if err != nil {
return nil, err
}
switch sig := p.(type) {
case *packet.Signature:
if sig.IssuerKeyId == nil {
return nil, errors.StructuralError("signature doesn't have an issuer")
}
issuerKeyId = *sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
case *packet.SignatureV3:
issuerKeyId = sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
default:
return nil, errors.StructuralError("non signature packet found")
}
keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
if len(keys) > 0 {
break
}
}
if len(keys) == 0 {
panic("unreachable")
}
h, wrappedHash, err := hashForSignature(hashFunc, sigType)
if err != nil {
return nil, err
}
if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
return nil, err
}
for _, key := range keys {
switch sig := p.(type) {
case *packet.Signature:
err = key.PublicKey.VerifySignature(h, sig)
case *packet.SignatureV3:
err = key.PublicKey.VerifySignatureV3(h, sig)
default:
panic("unreachable")
}
if err == nil {
return key.Entity, nil
}
}
return nil, err
}
// CheckArmoredDetachedSignature performs the same actions as
// CheckDetachedSignature but expects the signature to be armored.
func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
body, err := readArmored(signature, SignatureType)
if err != nil {
return
}
return CheckDetachedSignature(keyring, signed, body)
}

273
vendor/golang.org/x/crypto/openpgp/s2k/s2k.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package s2k implements the various OpenPGP string-to-key transforms as
// specified in RFC 4800 section 3.7.1.
package s2k // import "golang.org/x/crypto/openpgp/s2k"
import (
"crypto"
"hash"
"io"
"strconv"
"golang.org/x/crypto/openpgp/errors"
)
// Config collects configuration parameters for s2k key-stretching
// transformatioms. A nil *Config is valid and results in all default
// values. Currently, Config is used only by the Serialize function in
// this package.
type Config struct {
// Hash is the default hash function to be used. If
// nil, SHA1 is used.
Hash crypto.Hash
// S2KCount is only used for symmetric encryption. It
// determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 1024 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 65536 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. When set, it is strongly encrouraged to
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
S2KCount int
}
func (c *Config) hash() crypto.Hash {
if c == nil || uint(c.Hash) == 0 {
// SHA1 is the historical default in this package.
return crypto.SHA1
}
return c.Hash
}
func (c *Config) encodedCount() uint8 {
if c == nil || c.S2KCount == 0 {
return 96 // The common case. Correspoding to 65536
}
i := c.S2KCount
switch {
// Behave like GPG. Should we make 65536 the lowest value used?
case i < 1024:
i = 1024
case i > 65011712:
i = 65011712
}
return encodeCount(i)
}
// encodeCount converts an iterative "count" in the range 1024 to
// 65011712, inclusive, to an encoded count. The return value is the
// octet that is actually stored in the GPG file. encodeCount panics
// if i is not in the above range (encodedCount above takes care to
// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
func encodeCount(i int) uint8 {
if i < 1024 || i > 65011712 {
panic("count arg i outside the required range")
}
for encoded := 0; encoded < 256; encoded++ {
count := decodeCount(uint8(encoded))
if count >= i {
return uint8(encoded)
}
}
return 255
}
// decodeCount returns the s2k mode 3 iterative "count" corresponding to
// the encoded octet c.
func decodeCount(c uint8) int {
return (16 + int(c&15)) << (uint32(c>>4) + 6)
}
// Simple writes to out the result of computing the Simple S2K function (RFC
// 4880, section 3.7.1.1) using the given hash and input passphrase.
func Simple(out []byte, h hash.Hash, in []byte) {
Salted(out, h, in, nil)
}
var zero [1]byte
// Salted writes to out the result of computing the Salted S2K function (RFC
// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
done := 0
var digest []byte
for i := 0; done < len(out); i++ {
h.Reset()
for j := 0; j < i; j++ {
h.Write(zero[:])
}
h.Write(salt)
h.Write(in)
digest = h.Sum(digest[:0])
n := copy(out[done:], digest)
done += n
}
}
// Iterated writes to out the result of computing the Iterated and Salted S2K
// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
// salt and iteration count.
func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
combined := make([]byte, len(in)+len(salt))
copy(combined, salt)
copy(combined[len(salt):], in)
if count < len(combined) {
count = len(combined)
}
done := 0
var digest []byte
for i := 0; done < len(out); i++ {
h.Reset()
for j := 0; j < i; j++ {
h.Write(zero[:])
}
written := 0
for written < count {
if written+len(combined) > count {
todo := count - written
h.Write(combined[:todo])
written = count
} else {
h.Write(combined)
written += len(combined)
}
}
digest = h.Sum(digest[:0])
n := copy(out[done:], digest)
done += n
}
}
// Parse reads a binary specification for a string-to-key transformation from r
// and returns a function which performs that transform.
func Parse(r io.Reader) (f func(out, in []byte), err error) {
var buf [9]byte
_, err = io.ReadFull(r, buf[:2])
if err != nil {
return
}
hash, ok := HashIdToHash(buf[1])
if !ok {
return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
}
if !hash.Available() {
return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
}
h := hash.New()
switch buf[0] {
case 0:
f := func(out, in []byte) {
Simple(out, h, in)
}
return f, nil
case 1:
_, err = io.ReadFull(r, buf[:8])
if err != nil {
return
}
f := func(out, in []byte) {
Salted(out, h, in, buf[:8])
}
return f, nil
case 3:
_, err = io.ReadFull(r, buf[:9])
if err != nil {
return
}
count := decodeCount(buf[8])
f := func(out, in []byte) {
Iterated(out, h, in, buf[:8], count)
}
return f, nil
}
return nil, errors.UnsupportedError("S2K function")
}
// Serialize salts and stretches the given passphrase and writes the
// resulting key into key. It also serializes an S2K descriptor to
// w. The key stretching can be configured with c, which may be
// nil. In that case, sensible defaults will be used.
func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
var buf [11]byte
buf[0] = 3 /* iterated and salted */
buf[1], _ = HashToHashId(c.hash())
salt := buf[2:10]
if _, err := io.ReadFull(rand, salt); err != nil {
return err
}
encodedCount := c.encodedCount()
count := decodeCount(encodedCount)
buf[10] = encodedCount
if _, err := w.Write(buf[:]); err != nil {
return err
}
Iterated(key, c.hash().New(), passphrase, salt, count)
return nil
}
// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with
// Go's crypto.Hash type. See RFC 4880, section 9.4.
var hashToHashIdMapping = []struct {
id byte
hash crypto.Hash
name string
}{
{1, crypto.MD5, "MD5"},
{2, crypto.SHA1, "SHA1"},
{3, crypto.RIPEMD160, "RIPEMD160"},
{8, crypto.SHA256, "SHA256"},
{9, crypto.SHA384, "SHA384"},
{10, crypto.SHA512, "SHA512"},
{11, crypto.SHA224, "SHA224"},
}
// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
// hash id.
func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
for _, m := range hashToHashIdMapping {
if m.id == id {
return m.hash, true
}
}
return 0, false
}
// HashIdToString returns the name of the hash function corresponding to the
// given OpenPGP hash id.
func HashIdToString(id byte) (name string, ok bool) {
for _, m := range hashToHashIdMapping {
if m.id == id {
return m.name, true
}
}
return "", false
}
// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash.
func HashToHashId(h crypto.Hash) (id byte, ok bool) {
for _, m := range hashToHashIdMapping {
if m.hash == h {
return m.id, true
}
}
return 0, false
}

378
vendor/golang.org/x/crypto/openpgp/write.go generated vendored Normal file
View File

@ -0,0 +1,378 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import (
"crypto"
"hash"
"io"
"strconv"
"time"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
"golang.org/x/crypto/openpgp/s2k"
)
// DetachSign signs message with the private key from signer (which must
// already have been decrypted) and writes the signature to w.
// If config is nil, sensible defaults will be used.
func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
return detachSign(w, signer, message, packet.SigTypeBinary, config)
}
// ArmoredDetachSign signs message with the private key from signer (which
// must already have been decrypted) and writes an armored signature to w.
// If config is nil, sensible defaults will be used.
func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
}
// DetachSignText signs message (after canonicalising the line endings) with
// the private key from signer (which must already have been decrypted) and
// writes the signature to w.
// If config is nil, sensible defaults will be used.
func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
return detachSign(w, signer, message, packet.SigTypeText, config)
}
// ArmoredDetachSignText signs message (after canonicalising the line endings)
// with the private key from signer (which must already have been decrypted)
// and writes an armored signature to w.
// If config is nil, sensible defaults will be used.
func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
}
func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
out, err := armor.Encode(w, SignatureType, nil)
if err != nil {
return
}
err = detachSign(out, signer, message, sigType, config)
if err != nil {
return
}
return out.Close()
}
func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
if signer.PrivateKey == nil {
return errors.InvalidArgumentError("signing key doesn't have a private key")
}
if signer.PrivateKey.Encrypted {
return errors.InvalidArgumentError("signing key is encrypted")
}
sig := new(packet.Signature)
sig.SigType = sigType
sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo
sig.Hash = config.Hash()
sig.CreationTime = config.Now()
sig.IssuerKeyId = &signer.PrivateKey.KeyId
h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
if err != nil {
return
}
io.Copy(wrappedHash, message)
err = sig.Sign(h, signer.PrivateKey, config)
if err != nil {
return
}
return sig.Serialize(w)
}
// FileHints contains metadata about encrypted files. This metadata is, itself,
// encrypted.
type FileHints struct {
// IsBinary can be set to hint that the contents are binary data.
IsBinary bool
// FileName hints at the name of the file that should be written. It's
// truncated to 255 bytes if longer. It may be empty to suggest that the
// file should not be written to disk. It may be equal to "_CONSOLE" to
// suggest the data should not be written to disk.
FileName string
// ModTime contains the modification time of the file, or the zero time if not applicable.
ModTime time.Time
}
// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
// The resulting WriteCloser must be closed after the contents of the file have
// been written.
// If config is nil, sensible defaults will be used.
func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
if hints == nil {
hints = &FileHints{}
}
key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
if err != nil {
return
}
w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config)
if err != nil {
return
}
literaldata := w
if algo := config.Compression(); algo != packet.CompressionNone {
var compConfig *packet.CompressionConfig
if config != nil {
compConfig = config.CompressionConfig
}
literaldata, err = packet.SerializeCompressed(w, algo, compConfig)
if err != nil {
return
}
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds)
}
// intersectPreferences mutates and returns a prefix of a that contains only
// the values in the intersection of a and b. The order of a is preserved.
func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
var j int
for _, v := range a {
for _, v2 := range b {
if v == v2 {
a[j] = v
j++
break
}
}
}
return a[:j]
}
func hashToHashId(h crypto.Hash) uint8 {
v, ok := s2k.HashToHashId(h)
if !ok {
panic("tried to convert unknown hash")
}
return v
}
// Encrypt encrypts a message to a number of recipients and, optionally, signs
// it. hints contains optional information, that is also encrypted, that aids
// the recipients in processing the message. The resulting WriteCloser must
// be closed after the contents of the file have been written.
// If config is nil, sensible defaults will be used.
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
var signer *packet.PrivateKey
if signed != nil {
signKey, ok := signed.signingKey(config.Now())
if !ok {
return nil, errors.InvalidArgumentError("no valid signing keys")
}
signer = signKey.PrivateKey
if signer == nil {
return nil, errors.InvalidArgumentError("no private key in signing key")
}
if signer.Encrypted {
return nil, errors.InvalidArgumentError("signing key must be decrypted")
}
}
// These are the possible ciphers that we'll use for the message.
candidateCiphers := []uint8{
uint8(packet.CipherAES128),
uint8(packet.CipherAES256),
uint8(packet.CipherCAST5),
}
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
}
// In the event that a recipient doesn't specify any supported ciphers
// or hash functions, these are the ones that we assume that every
// implementation supports.
defaultCiphers := candidateCiphers[len(candidateCiphers)-1:]
defaultHashes := candidateHashes[len(candidateHashes)-1:]
encryptKeys := make([]Key, len(to))
for i := range to {
var ok bool
encryptKeys[i], ok = to[i].encryptionKey(config.Now())
if !ok {
return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
}
sig := to[i].primaryIdentity().SelfSignature
preferredSymmetric := sig.PreferredSymmetric
if len(preferredSymmetric) == 0 {
preferredSymmetric = defaultCiphers
}
preferredHashes := sig.PreferredHash
if len(preferredHashes) == 0 {
preferredHashes = defaultHashes
}
candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
}
if len(candidateCiphers) == 0 || len(candidateHashes) == 0 {
return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
}
cipher := packet.CipherFunction(candidateCiphers[0])
// If the cipher specified by config is a candidate, we'll use that.
configuredCipher := config.Cipher()
for _, c := range candidateCiphers {
cipherFunc := packet.CipherFunction(c)
if cipherFunc == configuredCipher {
cipher = cipherFunc
break
}
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := s2k.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
symKey := make([]byte, cipher.KeySize())
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
return nil, err
}
for _, key := range encryptKeys {
if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil {
return nil, err
}
}
encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
if err != nil {
return
}
if signer != nil {
ops := &packet.OnePassSignature{
SigType: packet.SigTypeBinary,
Hash: hash,
PubKeyAlgo: signer.PubKeyAlgo,
KeyId: signer.KeyId,
IsLast: true,
}
if err := ops.Serialize(encryptedData); err != nil {
return nil, err
}
}
if hints == nil {
hints = &FileHints{}
}
w := encryptedData
if signer != nil {
// If we need to write a signature packet after the literal
// data then we need to stop literalData from closing
// encryptedData.
w = noOpCloser{encryptedData}
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
if err != nil {
return nil, err
}
if signer != nil {
return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
}
return literalData, nil
}
// signatureWriter hashes the contents of a message while passing it along to
// literalData. When closed, it closes literalData, writes a signature packet
// to encryptedData and then also closes encryptedData.
type signatureWriter struct {
encryptedData io.WriteCloser
literalData io.WriteCloser
hashType crypto.Hash
h hash.Hash
signer *packet.PrivateKey
config *packet.Config
}
func (s signatureWriter) Write(data []byte) (int, error) {
s.h.Write(data)
return s.literalData.Write(data)
}
func (s signatureWriter) Close() error {
sig := &packet.Signature{
SigType: packet.SigTypeBinary,
PubKeyAlgo: s.signer.PubKeyAlgo,
Hash: s.hashType,
CreationTime: s.config.Now(),
IssuerKeyId: &s.signer.KeyId,
}
if err := sig.Sign(s.h, s.signer, s.config); err != nil {
return err
}
if err := s.literalData.Close(); err != nil {
return err
}
if err := sig.Serialize(s.encryptedData); err != nil {
return err
}
return s.encryptedData.Close()
}
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
// TODO: we have two of these in OpenPGP packages alone. This probably needs
// to be promoted somewhere more common.
type noOpCloser struct {
w io.Writer
}
func (c noOpCloser) Write(data []byte) (n int, err error) {
return c.w.Write(data)
}
func (c noOpCloser) Close() error {
return nil
}

683
vendor/golang.org/x/crypto/ssh/agent/client.go generated vendored Normal file
View File

@ -0,0 +1,683 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package agent implements the ssh-agent protocol, and provides both
// a client and a server. The client can talk to a standard ssh-agent
// that uses UNIX sockets, and one could implement an alternative
// ssh-agent process using the sample server.
//
// References:
// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00
package agent // import "golang.org/x/crypto/ssh/agent"
import (
"bytes"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"sync"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh"
)
// Agent represents the capabilities of an ssh-agent.
type Agent interface {
// List returns the identities known to the agent.
List() ([]*Key, error)
// Sign has the agent sign the data using a protocol 2 key as defined
// in [PROTOCOL.agent] section 2.6.2.
Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error)
// Add adds a private key to the agent.
Add(key AddedKey) error
// Remove removes all identities with the given public key.
Remove(key ssh.PublicKey) error
// RemoveAll removes all identities.
RemoveAll() error
// Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
Lock(passphrase []byte) error
// Unlock undoes the effect of Lock
Unlock(passphrase []byte) error
// Signers returns signers for all the known keys.
Signers() ([]ssh.Signer, error)
}
// ConstraintExtension describes an optional constraint defined by users.
type ConstraintExtension struct {
// ExtensionName consist of a UTF-8 string suffixed by the
// implementation domain following the naming scheme defined
// in Section 4.2 of [RFC4251], e.g. "foo@example.com".
ExtensionName string
// ExtensionDetails contains the actual content of the extended
// constraint.
ExtensionDetails []byte
}
// AddedKey describes an SSH key to be added to an Agent.
type AddedKey struct {
// PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or
// *ecdsa.PrivateKey, which will be inserted into the agent.
PrivateKey interface{}
// Certificate, if not nil, is communicated to the agent and will be
// stored with the key.
Certificate *ssh.Certificate
// Comment is an optional, free-form string.
Comment string
// LifetimeSecs, if not zero, is the number of seconds that the
// agent will store the key for.
LifetimeSecs uint32
// ConfirmBeforeUse, if true, requests that the agent confirm with the
// user before each use of this key.
ConfirmBeforeUse bool
// ConstraintExtensions are the experimental or private-use constraints
// defined by users.
ConstraintExtensions []ConstraintExtension
}
// See [PROTOCOL.agent], section 3.
const (
agentRequestV1Identities = 1
agentRemoveAllV1Identities = 9
// 3.2 Requests from client to agent for protocol 2 key operations
agentAddIdentity = 17
agentRemoveIdentity = 18
agentRemoveAllIdentities = 19
agentAddIDConstrained = 25
// 3.3 Key-type independent requests from client to agent
agentAddSmartcardKey = 20
agentRemoveSmartcardKey = 21
agentLock = 22
agentUnlock = 23
agentAddSmartcardKeyConstrained = 26
// 3.7 Key constraint identifiers
agentConstrainLifetime = 1
agentConstrainConfirm = 2
agentConstrainExtension = 3
)
// maxAgentResponseBytes is the maximum agent reply size that is accepted. This
// is a sanity check, not a limit in the spec.
const maxAgentResponseBytes = 16 << 20
// Agent messages:
// These structures mirror the wire format of the corresponding ssh agent
// messages found in [PROTOCOL.agent].
// 3.4 Generic replies from agent to client
const agentFailure = 5
type failureAgentMsg struct{}
const agentSuccess = 6
type successAgentMsg struct{}
// See [PROTOCOL.agent], section 2.5.2.
const agentRequestIdentities = 11
type requestIdentitiesAgentMsg struct{}
// See [PROTOCOL.agent], section 2.5.2.
const agentIdentitiesAnswer = 12
type identitiesAnswerAgentMsg struct {
NumKeys uint32 `sshtype:"12"`
Keys []byte `ssh:"rest"`
}
// See [PROTOCOL.agent], section 2.6.2.
const agentSignRequest = 13
type signRequestAgentMsg struct {
KeyBlob []byte `sshtype:"13"`
Data []byte
Flags uint32
}
// See [PROTOCOL.agent], section 2.6.2.
// 3.6 Replies from agent to client for protocol 2 key operations
const agentSignResponse = 14
type signResponseAgentMsg struct {
SigBlob []byte `sshtype:"14"`
}
type publicKey struct {
Format string
Rest []byte `ssh:"rest"`
}
// 3.7 Key constraint identifiers
type constrainLifetimeAgentMsg struct {
LifetimeSecs uint32 `sshtype:"1"`
}
type constrainExtensionAgentMsg struct {
ExtensionName string `sshtype:"3"`
ExtensionDetails []byte
// Rest is a field used for parsing, not part of message
Rest []byte `ssh:"rest"`
}
// Key represents a protocol 2 public key as defined in
// [PROTOCOL.agent], section 2.5.2.
type Key struct {
Format string
Blob []byte
Comment string
}
func clientErr(err error) error {
return fmt.Errorf("agent: client error: %v", err)
}
// String returns the storage form of an agent key with the format, base64
// encoded serialized key, and the comment if it is not empty.
func (k *Key) String() string {
s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob)
if k.Comment != "" {
s += " " + k.Comment
}
return s
}
// Type returns the public key type.
func (k *Key) Type() string {
return k.Format
}
// Marshal returns key blob to satisfy the ssh.PublicKey interface.
func (k *Key) Marshal() []byte {
return k.Blob
}
// Verify satisfies the ssh.PublicKey interface.
func (k *Key) Verify(data []byte, sig *ssh.Signature) error {
pubKey, err := ssh.ParsePublicKey(k.Blob)
if err != nil {
return fmt.Errorf("agent: bad public key: %v", err)
}
return pubKey.Verify(data, sig)
}
type wireKey struct {
Format string
Rest []byte `ssh:"rest"`
}
func parseKey(in []byte) (out *Key, rest []byte, err error) {
var record struct {
Blob []byte
Comment string
Rest []byte `ssh:"rest"`
}
if err := ssh.Unmarshal(in, &record); err != nil {
return nil, nil, err
}
var wk wireKey
if err := ssh.Unmarshal(record.Blob, &wk); err != nil {
return nil, nil, err
}
return &Key{
Format: wk.Format,
Blob: record.Blob,
Comment: record.Comment,
}, record.Rest, nil
}
// client is a client for an ssh-agent process.
type client struct {
// conn is typically a *net.UnixConn
conn io.ReadWriter
// mu is used to prevent concurrent access to the agent
mu sync.Mutex
}
// NewClient returns an Agent that talks to an ssh-agent process over
// the given connection.
func NewClient(rw io.ReadWriter) Agent {
return &client{conn: rw}
}
// call sends an RPC to the agent. On success, the reply is
// unmarshaled into reply and replyType is set to the first byte of
// the reply, which contains the type of the message.
func (c *client) call(req []byte) (reply interface{}, err error) {
c.mu.Lock()
defer c.mu.Unlock()
msg := make([]byte, 4+len(req))
binary.BigEndian.PutUint32(msg, uint32(len(req)))
copy(msg[4:], req)
if _, err = c.conn.Write(msg); err != nil {
return nil, clientErr(err)
}
var respSizeBuf [4]byte
if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil {
return nil, clientErr(err)
}
respSize := binary.BigEndian.Uint32(respSizeBuf[:])
if respSize > maxAgentResponseBytes {
return nil, clientErr(err)
}
buf := make([]byte, respSize)
if _, err = io.ReadFull(c.conn, buf); err != nil {
return nil, clientErr(err)
}
reply, err = unmarshal(buf)
if err != nil {
return nil, clientErr(err)
}
return reply, err
}
func (c *client) simpleCall(req []byte) error {
resp, err := c.call(req)
if err != nil {
return err
}
if _, ok := resp.(*successAgentMsg); ok {
return nil
}
return errors.New("agent: failure")
}
func (c *client) RemoveAll() error {
return c.simpleCall([]byte{agentRemoveAllIdentities})
}
func (c *client) Remove(key ssh.PublicKey) error {
req := ssh.Marshal(&agentRemoveIdentityMsg{
KeyBlob: key.Marshal(),
})
return c.simpleCall(req)
}
func (c *client) Lock(passphrase []byte) error {
req := ssh.Marshal(&agentLockMsg{
Passphrase: passphrase,
})
return c.simpleCall(req)
}
func (c *client) Unlock(passphrase []byte) error {
req := ssh.Marshal(&agentUnlockMsg{
Passphrase: passphrase,
})
return c.simpleCall(req)
}
// List returns the identities known to the agent.
func (c *client) List() ([]*Key, error) {
// see [PROTOCOL.agent] section 2.5.2.
req := []byte{agentRequestIdentities}
msg, err := c.call(req)
if err != nil {
return nil, err
}
switch msg := msg.(type) {
case *identitiesAnswerAgentMsg:
if msg.NumKeys > maxAgentResponseBytes/8 {
return nil, errors.New("agent: too many keys in agent reply")
}
keys := make([]*Key, msg.NumKeys)
data := msg.Keys
for i := uint32(0); i < msg.NumKeys; i++ {
var key *Key
var err error
if key, data, err = parseKey(data); err != nil {
return nil, err
}
keys[i] = key
}
return keys, nil
case *failureAgentMsg:
return nil, errors.New("agent: failed to list keys")
}
panic("unreachable")
}
// Sign has the agent sign the data using a protocol 2 key as defined
// in [PROTOCOL.agent] section 2.6.2.
func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
req := ssh.Marshal(signRequestAgentMsg{
KeyBlob: key.Marshal(),
Data: data,
})
msg, err := c.call(req)
if err != nil {
return nil, err
}
switch msg := msg.(type) {
case *signResponseAgentMsg:
var sig ssh.Signature
if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil {
return nil, err
}
return &sig, nil
case *failureAgentMsg:
return nil, errors.New("agent: failed to sign challenge")
}
panic("unreachable")
}
// unmarshal parses an agent message in packet, returning the parsed
// form and the message type of packet.
func unmarshal(packet []byte) (interface{}, error) {
if len(packet) < 1 {
return nil, errors.New("agent: empty packet")
}
var msg interface{}
switch packet[0] {
case agentFailure:
return new(failureAgentMsg), nil
case agentSuccess:
return new(successAgentMsg), nil
case agentIdentitiesAnswer:
msg = new(identitiesAnswerAgentMsg)
case agentSignResponse:
msg = new(signResponseAgentMsg)
case agentV1IdentitiesAnswer:
msg = new(agentV1IdentityMsg)
default:
return nil, fmt.Errorf("agent: unknown type tag %d", packet[0])
}
if err := ssh.Unmarshal(packet, msg); err != nil {
return nil, err
}
return msg, nil
}
type rsaKeyMsg struct {
Type string `sshtype:"17|25"`
N *big.Int
E *big.Int
D *big.Int
Iqmp *big.Int // IQMP = Inverse Q Mod P
P *big.Int
Q *big.Int
Comments string
Constraints []byte `ssh:"rest"`
}
type dsaKeyMsg struct {
Type string `sshtype:"17|25"`
P *big.Int
Q *big.Int
G *big.Int
Y *big.Int
X *big.Int
Comments string
Constraints []byte `ssh:"rest"`
}
type ecdsaKeyMsg struct {
Type string `sshtype:"17|25"`
Curve string
KeyBytes []byte
D *big.Int
Comments string
Constraints []byte `ssh:"rest"`
}
type ed25519KeyMsg struct {
Type string `sshtype:"17|25"`
Pub []byte
Priv []byte
Comments string
Constraints []byte `ssh:"rest"`
}
// Insert adds a private key to the agent.
func (c *client) insertKey(s interface{}, comment string, constraints []byte) error {
var req []byte
switch k := s.(type) {
case *rsa.PrivateKey:
if len(k.Primes) != 2 {
return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
}
k.Precompute()
req = ssh.Marshal(rsaKeyMsg{
Type: ssh.KeyAlgoRSA,
N: k.N,
E: big.NewInt(int64(k.E)),
D: k.D,
Iqmp: k.Precomputed.Qinv,
P: k.Primes[0],
Q: k.Primes[1],
Comments: comment,
Constraints: constraints,
})
case *dsa.PrivateKey:
req = ssh.Marshal(dsaKeyMsg{
Type: ssh.KeyAlgoDSA,
P: k.P,
Q: k.Q,
G: k.G,
Y: k.Y,
X: k.X,
Comments: comment,
Constraints: constraints,
})
case *ecdsa.PrivateKey:
nistID := fmt.Sprintf("nistp%d", k.Params().BitSize)
req = ssh.Marshal(ecdsaKeyMsg{
Type: "ecdsa-sha2-" + nistID,
Curve: nistID,
KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y),
D: k.D,
Comments: comment,
Constraints: constraints,
})
case *ed25519.PrivateKey:
req = ssh.Marshal(ed25519KeyMsg{
Type: ssh.KeyAlgoED25519,
Pub: []byte(*k)[32:],
Priv: []byte(*k),
Comments: comment,
Constraints: constraints,
})
default:
return fmt.Errorf("agent: unsupported key type %T", s)
}
// if constraints are present then the message type needs to be changed.
if len(constraints) != 0 {
req[0] = agentAddIDConstrained
}
resp, err := c.call(req)
if err != nil {
return err
}
if _, ok := resp.(*successAgentMsg); ok {
return nil
}
return errors.New("agent: failure")
}
type rsaCertMsg struct {
Type string `sshtype:"17|25"`
CertBytes []byte
D *big.Int
Iqmp *big.Int // IQMP = Inverse Q Mod P
P *big.Int
Q *big.Int
Comments string
Constraints []byte `ssh:"rest"`
}
type dsaCertMsg struct {
Type string `sshtype:"17|25"`
CertBytes []byte
X *big.Int
Comments string
Constraints []byte `ssh:"rest"`
}
type ecdsaCertMsg struct {
Type string `sshtype:"17|25"`
CertBytes []byte
D *big.Int
Comments string
Constraints []byte `ssh:"rest"`
}
type ed25519CertMsg struct {
Type string `sshtype:"17|25"`
CertBytes []byte
Pub []byte
Priv []byte
Comments string
Constraints []byte `ssh:"rest"`
}
// Add adds a private key to the agent. If a certificate is given,
// that certificate is added instead as public key.
func (c *client) Add(key AddedKey) error {
var constraints []byte
if secs := key.LifetimeSecs; secs != 0 {
constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...)
}
if key.ConfirmBeforeUse {
constraints = append(constraints, agentConstrainConfirm)
}
cert := key.Certificate
if cert == nil {
return c.insertKey(key.PrivateKey, key.Comment, constraints)
}
return c.insertCert(key.PrivateKey, cert, key.Comment, constraints)
}
func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error {
var req []byte
switch k := s.(type) {
case *rsa.PrivateKey:
if len(k.Primes) != 2 {
return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
}
k.Precompute()
req = ssh.Marshal(rsaCertMsg{
Type: cert.Type(),
CertBytes: cert.Marshal(),
D: k.D,
Iqmp: k.Precomputed.Qinv,
P: k.Primes[0],
Q: k.Primes[1],
Comments: comment,
Constraints: constraints,
})
case *dsa.PrivateKey:
req = ssh.Marshal(dsaCertMsg{
Type: cert.Type(),
CertBytes: cert.Marshal(),
X: k.X,
Comments: comment,
Constraints: constraints,
})
case *ecdsa.PrivateKey:
req = ssh.Marshal(ecdsaCertMsg{
Type: cert.Type(),
CertBytes: cert.Marshal(),
D: k.D,
Comments: comment,
Constraints: constraints,
})
case *ed25519.PrivateKey:
req = ssh.Marshal(ed25519CertMsg{
Type: cert.Type(),
CertBytes: cert.Marshal(),
Pub: []byte(*k)[32:],
Priv: []byte(*k),
Comments: comment,
Constraints: constraints,
})
default:
return fmt.Errorf("agent: unsupported key type %T", s)
}
// if constraints are present then the message type needs to be changed.
if len(constraints) != 0 {
req[0] = agentAddIDConstrained
}
signer, err := ssh.NewSignerFromKey(s)
if err != nil {
return err
}
if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
return errors.New("agent: signer and cert have different public key")
}
resp, err := c.call(req)
if err != nil {
return err
}
if _, ok := resp.(*successAgentMsg); ok {
return nil
}
return errors.New("agent: failure")
}
// Signers provides a callback for client authentication.
func (c *client) Signers() ([]ssh.Signer, error) {
keys, err := c.List()
if err != nil {
return nil, err
}
var result []ssh.Signer
for _, k := range keys {
result = append(result, &agentKeyringSigner{c, k})
}
return result, nil
}
type agentKeyringSigner struct {
agent *client
pub ssh.PublicKey
}
func (s *agentKeyringSigner) PublicKey() ssh.PublicKey {
return s.pub
}
func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) {
// The agent has its own entropy source, so the rand argument is ignored.
return s.agent.Sign(s.pub, data)
}

103
vendor/golang.org/x/crypto/ssh/agent/forward.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package agent
import (
"errors"
"io"
"net"
"sync"
"golang.org/x/crypto/ssh"
)
// RequestAgentForwarding sets up agent forwarding for the session.
// ForwardToAgent or ForwardToRemote should be called to route
// the authentication requests.
func RequestAgentForwarding(session *ssh.Session) error {
ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil)
if err != nil {
return err
}
if !ok {
return errors.New("forwarding request denied")
}
return nil
}
// ForwardToAgent routes authentication requests to the given keyring.
func ForwardToAgent(client *ssh.Client, keyring Agent) error {
channels := client.HandleChannelOpen(channelType)
if channels == nil {
return errors.New("agent: already have handler for " + channelType)
}
go func() {
for ch := range channels {
channel, reqs, err := ch.Accept()
if err != nil {
continue
}
go ssh.DiscardRequests(reqs)
go func() {
ServeAgent(keyring, channel)
channel.Close()
}()
}
}()
return nil
}
const channelType = "auth-agent@openssh.com"
// ForwardToRemote routes authentication requests to the ssh-agent
// process serving on the given unix socket.
func ForwardToRemote(client *ssh.Client, addr string) error {
channels := client.HandleChannelOpen(channelType)
if channels == nil {
return errors.New("agent: already have handler for " + channelType)
}
conn, err := net.Dial("unix", addr)
if err != nil {
return err
}
conn.Close()
go func() {
for ch := range channels {
channel, reqs, err := ch.Accept()
if err != nil {
continue
}
go ssh.DiscardRequests(reqs)
go forwardUnixSocket(channel, addr)
}
}()
return nil
}
func forwardUnixSocket(channel ssh.Channel, addr string) {
conn, err := net.Dial("unix", addr)
if err != nil {
return
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
io.Copy(conn, channel)
conn.(*net.UnixConn).CloseWrite()
wg.Done()
}()
go func() {
io.Copy(channel, conn)
channel.CloseWrite()
wg.Done()
}()
wg.Wait()
conn.Close()
channel.Close()
}

215
vendor/golang.org/x/crypto/ssh/agent/keyring.go generated vendored Normal file
View File

@ -0,0 +1,215 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package agent
import (
"bytes"
"crypto/rand"
"crypto/subtle"
"errors"
"fmt"
"sync"
"time"
"golang.org/x/crypto/ssh"
)
type privKey struct {
signer ssh.Signer
comment string
expire *time.Time
}
type keyring struct {
mu sync.Mutex
keys []privKey
locked bool
passphrase []byte
}
var errLocked = errors.New("agent: locked")
// NewKeyring returns an Agent that holds keys in memory. It is safe
// for concurrent use by multiple goroutines.
func NewKeyring() Agent {
return &keyring{}
}
// RemoveAll removes all identities.
func (r *keyring) RemoveAll() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
return errLocked
}
r.keys = nil
return nil
}
// removeLocked does the actual key removal. The caller must already be holding the
// keyring mutex.
func (r *keyring) removeLocked(want []byte) error {
found := false
for i := 0; i < len(r.keys); {
if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) {
found = true
r.keys[i] = r.keys[len(r.keys)-1]
r.keys = r.keys[:len(r.keys)-1]
continue
} else {
i++
}
}
if !found {
return errors.New("agent: key not found")
}
return nil
}
// Remove removes all identities with the given public key.
func (r *keyring) Remove(key ssh.PublicKey) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
return errLocked
}
return r.removeLocked(key.Marshal())
}
// Lock locks the agent. Sign and Remove will fail, and List will return an empty list.
func (r *keyring) Lock(passphrase []byte) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
return errLocked
}
r.locked = true
r.passphrase = passphrase
return nil
}
// Unlock undoes the effect of Lock
func (r *keyring) Unlock(passphrase []byte) error {
r.mu.Lock()
defer r.mu.Unlock()
if !r.locked {
return errors.New("agent: not locked")
}
if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
return fmt.Errorf("agent: incorrect passphrase")
}
r.locked = false
r.passphrase = nil
return nil
}
// expireKeysLocked removes expired keys from the keyring. If a key was added
// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
// ellapsed, it is removed. The caller *must* be holding the keyring mutex.
func (r *keyring) expireKeysLocked() {
for _, k := range r.keys {
if k.expire != nil && time.Now().After(*k.expire) {
r.removeLocked(k.signer.PublicKey().Marshal())
}
}
}
// List returns the identities known to the agent.
func (r *keyring) List() ([]*Key, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
// section 2.7: locked agents return empty.
return nil, nil
}
r.expireKeysLocked()
var ids []*Key
for _, k := range r.keys {
pub := k.signer.PublicKey()
ids = append(ids, &Key{
Format: pub.Type(),
Blob: pub.Marshal(),
Comment: k.comment})
}
return ids, nil
}
// Insert adds a private key to the keyring. If a certificate
// is given, that certificate is added as public key. Note that
// any constraints given are ignored.
func (r *keyring) Add(key AddedKey) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
return errLocked
}
signer, err := ssh.NewSignerFromKey(key.PrivateKey)
if err != nil {
return err
}
if cert := key.Certificate; cert != nil {
signer, err = ssh.NewCertSigner(cert, signer)
if err != nil {
return err
}
}
p := privKey{
signer: signer,
comment: key.Comment,
}
if key.LifetimeSecs > 0 {
t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second)
p.expire = &t
}
r.keys = append(r.keys, p)
return nil
}
// Sign returns a signature for the data.
func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
return nil, errLocked
}
r.expireKeysLocked()
wanted := key.Marshal()
for _, k := range r.keys {
if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) {
return k.signer.Sign(rand.Reader, data)
}
}
return nil, errors.New("not found")
}
// Signers returns signers for all the known keys.
func (r *keyring) Signers() ([]ssh.Signer, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
return nil, errLocked
}
r.expireKeysLocked()
s := make([]ssh.Signer, 0, len(r.keys))
for _, k := range r.keys {
s = append(s, k.signer)
}
return s, nil
}

523
vendor/golang.org/x/crypto/ssh/agent/server.go generated vendored Normal file
View File

@ -0,0 +1,523 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package agent
import (
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"math/big"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh"
)
// Server wraps an Agent and uses it to implement the agent side of
// the SSH-agent, wire protocol.
type server struct {
agent Agent
}
func (s *server) processRequestBytes(reqData []byte) []byte {
rep, err := s.processRequest(reqData)
if err != nil {
if err != errLocked {
// TODO(hanwen): provide better logging interface?
log.Printf("agent %d: %v", reqData[0], err)
}
return []byte{agentFailure}
}
if err == nil && rep == nil {
return []byte{agentSuccess}
}
return ssh.Marshal(rep)
}
func marshalKey(k *Key) []byte {
var record struct {
Blob []byte
Comment string
}
record.Blob = k.Marshal()
record.Comment = k.Comment
return ssh.Marshal(&record)
}
// See [PROTOCOL.agent], section 2.5.1.
const agentV1IdentitiesAnswer = 2
type agentV1IdentityMsg struct {
Numkeys uint32 `sshtype:"2"`
}
type agentRemoveIdentityMsg struct {
KeyBlob []byte `sshtype:"18"`
}
type agentLockMsg struct {
Passphrase []byte `sshtype:"22"`
}
type agentUnlockMsg struct {
Passphrase []byte `sshtype:"23"`
}
func (s *server) processRequest(data []byte) (interface{}, error) {
switch data[0] {
case agentRequestV1Identities:
return &agentV1IdentityMsg{0}, nil
case agentRemoveAllV1Identities:
return nil, nil
case agentRemoveIdentity:
var req agentRemoveIdentityMsg
if err := ssh.Unmarshal(data, &req); err != nil {
return nil, err
}
var wk wireKey
if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
return nil, err
}
return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob})
case agentRemoveAllIdentities:
return nil, s.agent.RemoveAll()
case agentLock:
var req agentLockMsg
if err := ssh.Unmarshal(data, &req); err != nil {
return nil, err
}
return nil, s.agent.Lock(req.Passphrase)
case agentUnlock:
var req agentUnlockMsg
if err := ssh.Unmarshal(data, &req); err != nil {
return nil, err
}
return nil, s.agent.Unlock(req.Passphrase)
case agentSignRequest:
var req signRequestAgentMsg
if err := ssh.Unmarshal(data, &req); err != nil {
return nil, err
}
var wk wireKey
if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
return nil, err
}
k := &Key{
Format: wk.Format,
Blob: req.KeyBlob,
}
sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags.
if err != nil {
return nil, err
}
return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil
case agentRequestIdentities:
keys, err := s.agent.List()
if err != nil {
return nil, err
}
rep := identitiesAnswerAgentMsg{
NumKeys: uint32(len(keys)),
}
for _, k := range keys {
rep.Keys = append(rep.Keys, marshalKey(k)...)
}
return rep, nil
case agentAddIDConstrained, agentAddIdentity:
return nil, s.insertIdentity(data)
}
return nil, fmt.Errorf("unknown opcode %d", data[0])
}
func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) {
for len(constraints) != 0 {
switch constraints[0] {
case agentConstrainLifetime:
lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5])
constraints = constraints[5:]
case agentConstrainConfirm:
confirmBeforeUse = true
constraints = constraints[1:]
case agentConstrainExtension:
var msg constrainExtensionAgentMsg
if err = ssh.Unmarshal(constraints, &msg); err != nil {
return 0, false, nil, err
}
extensions = append(extensions, ConstraintExtension{
ExtensionName: msg.ExtensionName,
ExtensionDetails: msg.ExtensionDetails,
})
constraints = msg.Rest
default:
return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0])
}
}
return
}
func setConstraints(key *AddedKey, constraintBytes []byte) error {
lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes)
if err != nil {
return err
}
key.LifetimeSecs = lifetimeSecs
key.ConfirmBeforeUse = confirmBeforeUse
key.ConstraintExtensions = constraintExtensions
return nil
}
func parseRSAKey(req []byte) (*AddedKey, error) {
var k rsaKeyMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
if k.E.BitLen() > 30 {
return nil, errors.New("agent: RSA public exponent too large")
}
priv := &rsa.PrivateKey{
PublicKey: rsa.PublicKey{
E: int(k.E.Int64()),
N: k.N,
},
D: k.D,
Primes: []*big.Int{k.P, k.Q},
}
priv.Precompute()
addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func parseEd25519Key(req []byte) (*AddedKey, error) {
var k ed25519KeyMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
priv := ed25519.PrivateKey(k.Priv)
addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func parseDSAKey(req []byte) (*AddedKey, error) {
var k dsaKeyMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
priv := &dsa.PrivateKey{
PublicKey: dsa.PublicKey{
Parameters: dsa.Parameters{
P: k.P,
Q: k.Q,
G: k.G,
},
Y: k.Y,
},
X: k.X,
}
addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) {
priv = &ecdsa.PrivateKey{
D: privScalar,
}
switch curveName {
case "nistp256":
priv.Curve = elliptic.P256()
case "nistp384":
priv.Curve = elliptic.P384()
case "nistp521":
priv.Curve = elliptic.P521()
default:
return nil, fmt.Errorf("agent: unknown curve %q", curveName)
}
priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes)
if priv.X == nil || priv.Y == nil {
return nil, errors.New("agent: point not on curve")
}
return priv, nil
}
func parseEd25519Cert(req []byte) (*AddedKey, error) {
var k ed25519CertMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
if err != nil {
return nil, err
}
priv := ed25519.PrivateKey(k.Priv)
cert, ok := pubKey.(*ssh.Certificate)
if !ok {
return nil, errors.New("agent: bad ED25519 certificate")
}
addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func parseECDSAKey(req []byte) (*AddedKey, error) {
var k ecdsaKeyMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D)
if err != nil {
return nil, err
}
addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func parseRSACert(req []byte) (*AddedKey, error) {
var k rsaCertMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
if err != nil {
return nil, err
}
cert, ok := pubKey.(*ssh.Certificate)
if !ok {
return nil, errors.New("agent: bad RSA certificate")
}
// An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go
var rsaPub struct {
Name string
E *big.Int
N *big.Int
}
if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil {
return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
}
if rsaPub.E.BitLen() > 30 {
return nil, errors.New("agent: RSA public exponent too large")
}
priv := rsa.PrivateKey{
PublicKey: rsa.PublicKey{
E: int(rsaPub.E.Int64()),
N: rsaPub.N,
},
D: k.D,
Primes: []*big.Int{k.Q, k.P},
}
priv.Precompute()
addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func parseDSACert(req []byte) (*AddedKey, error) {
var k dsaCertMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
if err != nil {
return nil, err
}
cert, ok := pubKey.(*ssh.Certificate)
if !ok {
return nil, errors.New("agent: bad DSA certificate")
}
// A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go
var w struct {
Name string
P, Q, G, Y *big.Int
}
if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil {
return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
}
priv := &dsa.PrivateKey{
PublicKey: dsa.PublicKey{
Parameters: dsa.Parameters{
P: w.P,
Q: w.Q,
G: w.G,
},
Y: w.Y,
},
X: k.X,
}
addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func parseECDSACert(req []byte) (*AddedKey, error) {
var k ecdsaCertMsg
if err := ssh.Unmarshal(req, &k); err != nil {
return nil, err
}
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
if err != nil {
return nil, err
}
cert, ok := pubKey.(*ssh.Certificate)
if !ok {
return nil, errors.New("agent: bad ECDSA certificate")
}
// An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go
var ecdsaPub struct {
Name string
ID string
Key []byte
}
if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil {
return nil, err
}
priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D)
if err != nil {
return nil, err
}
addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}
if err := setConstraints(addedKey, k.Constraints); err != nil {
return nil, err
}
return addedKey, nil
}
func (s *server) insertIdentity(req []byte) error {
var record struct {
Type string `sshtype:"17|25"`
Rest []byte `ssh:"rest"`
}
if err := ssh.Unmarshal(req, &record); err != nil {
return err
}
var addedKey *AddedKey
var err error
switch record.Type {
case ssh.KeyAlgoRSA:
addedKey, err = parseRSAKey(req)
case ssh.KeyAlgoDSA:
addedKey, err = parseDSAKey(req)
case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521:
addedKey, err = parseECDSAKey(req)
case ssh.KeyAlgoED25519:
addedKey, err = parseEd25519Key(req)
case ssh.CertAlgoRSAv01:
addedKey, err = parseRSACert(req)
case ssh.CertAlgoDSAv01:
addedKey, err = parseDSACert(req)
case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01:
addedKey, err = parseECDSACert(req)
case ssh.CertAlgoED25519v01:
addedKey, err = parseEd25519Cert(req)
default:
return fmt.Errorf("agent: not implemented: %q", record.Type)
}
if err != nil {
return err
}
return s.agent.Add(*addedKey)
}
// ServeAgent serves the agent protocol on the given connection. It
// returns when an I/O error occurs.
func ServeAgent(agent Agent, c io.ReadWriter) error {
s := &server{agent}
var length [4]byte
for {
if _, err := io.ReadFull(c, length[:]); err != nil {
return err
}
l := binary.BigEndian.Uint32(length[:])
if l > maxAgentResponseBytes {
// We also cap requests.
return fmt.Errorf("agent: request too large: %d", l)
}
req := make([]byte, l)
if _, err := io.ReadFull(c, req); err != nil {
return err
}
repData := s.processRequestBytes(req)
if len(repData) > maxAgentResponseBytes {
return fmt.Errorf("agent: reply too large: %d bytes", len(repData))
}
binary.BigEndian.PutUint32(length[:], uint32(len(repData)))
if _, err := c.Write(length[:]); err != nil {
return err
}
if _, err := c.Write(repData); err != nil {
return err
}
}
}

540
vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go generated vendored Normal file
View File

@ -0,0 +1,540 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package knownhosts implements a parser for the OpenSSH known_hosts
// host key database, and provides utility functions for writing
// OpenSSH compliant known_hosts files.
package knownhosts
import (
"bufio"
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"errors"
"fmt"
"io"
"net"
"os"
"strings"
"golang.org/x/crypto/ssh"
)
// See the sshd manpage
// (http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT) for
// background.
type addr struct{ host, port string }
func (a *addr) String() string {
h := a.host
if strings.Contains(h, ":") {
h = "[" + h + "]"
}
return h + ":" + a.port
}
type matcher interface {
match(addr) bool
}
type hostPattern struct {
negate bool
addr addr
}
func (p *hostPattern) String() string {
n := ""
if p.negate {
n = "!"
}
return n + p.addr.String()
}
type hostPatterns []hostPattern
func (ps hostPatterns) match(a addr) bool {
matched := false
for _, p := range ps {
if !p.match(a) {
continue
}
if p.negate {
return false
}
matched = true
}
return matched
}
// See
// https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/addrmatch.c
// The matching of * has no regard for separators, unlike filesystem globs
func wildcardMatch(pat []byte, str []byte) bool {
for {
if len(pat) == 0 {
return len(str) == 0
}
if len(str) == 0 {
return false
}
if pat[0] == '*' {
if len(pat) == 1 {
return true
}
for j := range str {
if wildcardMatch(pat[1:], str[j:]) {
return true
}
}
return false
}
if pat[0] == '?' || pat[0] == str[0] {
pat = pat[1:]
str = str[1:]
} else {
return false
}
}
}
func (p *hostPattern) match(a addr) bool {
return wildcardMatch([]byte(p.addr.host), []byte(a.host)) && p.addr.port == a.port
}
type keyDBLine struct {
cert bool
matcher matcher
knownKey KnownKey
}
func serialize(k ssh.PublicKey) string {
return k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal())
}
func (l *keyDBLine) match(a addr) bool {
return l.matcher.match(a)
}
type hostKeyDB struct {
// Serialized version of revoked keys
revoked map[string]*KnownKey
lines []keyDBLine
}
func newHostKeyDB() *hostKeyDB {
db := &hostKeyDB{
revoked: make(map[string]*KnownKey),
}
return db
}
func keyEq(a, b ssh.PublicKey) bool {
return bytes.Equal(a.Marshal(), b.Marshal())
}
// IsAuthorityForHost can be used as a callback in ssh.CertChecker
func (db *hostKeyDB) IsHostAuthority(remote ssh.PublicKey, address string) bool {
h, p, err := net.SplitHostPort(address)
if err != nil {
return false
}
a := addr{host: h, port: p}
for _, l := range db.lines {
if l.cert && keyEq(l.knownKey.Key, remote) && l.match(a) {
return true
}
}
return false
}
// IsRevoked can be used as a callback in ssh.CertChecker
func (db *hostKeyDB) IsRevoked(key *ssh.Certificate) bool {
_, ok := db.revoked[string(key.Marshal())]
return ok
}
const markerCert = "@cert-authority"
const markerRevoked = "@revoked"
func nextWord(line []byte) (string, []byte) {
i := bytes.IndexAny(line, "\t ")
if i == -1 {
return string(line), nil
}
return string(line[:i]), bytes.TrimSpace(line[i:])
}
func parseLine(line []byte) (marker, host string, key ssh.PublicKey, err error) {
if w, next := nextWord(line); w == markerCert || w == markerRevoked {
marker = w
line = next
}
host, line = nextWord(line)
if len(line) == 0 {
return "", "", nil, errors.New("knownhosts: missing host pattern")
}
// ignore the keytype as it's in the key blob anyway.
_, line = nextWord(line)
if len(line) == 0 {
return "", "", nil, errors.New("knownhosts: missing key type pattern")
}
keyBlob, _ := nextWord(line)
keyBytes, err := base64.StdEncoding.DecodeString(keyBlob)
if err != nil {
return "", "", nil, err
}
key, err = ssh.ParsePublicKey(keyBytes)
if err != nil {
return "", "", nil, err
}
return marker, host, key, nil
}
func (db *hostKeyDB) parseLine(line []byte, filename string, linenum int) error {
marker, pattern, key, err := parseLine(line)
if err != nil {
return err
}
if marker == markerRevoked {
db.revoked[string(key.Marshal())] = &KnownKey{
Key: key,
Filename: filename,
Line: linenum,
}
return nil
}
entry := keyDBLine{
cert: marker == markerCert,
knownKey: KnownKey{
Filename: filename,
Line: linenum,
Key: key,
},
}
if pattern[0] == '|' {
entry.matcher, err = newHashedHost(pattern)
} else {
entry.matcher, err = newHostnameMatcher(pattern)
}
if err != nil {
return err
}
db.lines = append(db.lines, entry)
return nil
}
func newHostnameMatcher(pattern string) (matcher, error) {
var hps hostPatterns
for _, p := range strings.Split(pattern, ",") {
if len(p) == 0 {
continue
}
var a addr
var negate bool
if p[0] == '!' {
negate = true
p = p[1:]
}
if len(p) == 0 {
return nil, errors.New("knownhosts: negation without following hostname")
}
var err error
if p[0] == '[' {
a.host, a.port, err = net.SplitHostPort(p)
if err != nil {
return nil, err
}
} else {
a.host, a.port, err = net.SplitHostPort(p)
if err != nil {
a.host = p
a.port = "22"
}
}
hps = append(hps, hostPattern{
negate: negate,
addr: a,
})
}
return hps, nil
}
// KnownKey represents a key declared in a known_hosts file.
type KnownKey struct {
Key ssh.PublicKey
Filename string
Line int
}
func (k *KnownKey) String() string {
return fmt.Sprintf("%s:%d: %s", k.Filename, k.Line, serialize(k.Key))
}
// KeyError is returned if we did not find the key in the host key
// database, or there was a mismatch. Typically, in batch
// applications, this should be interpreted as failure. Interactive
// applications can offer an interactive prompt to the user.
type KeyError struct {
// Want holds the accepted host keys. For each key algorithm,
// there can be one hostkey. If Want is empty, the host is
// unknown. If Want is non-empty, there was a mismatch, which
// can signify a MITM attack.
Want []KnownKey
}
func (u *KeyError) Error() string {
if len(u.Want) == 0 {
return "knownhosts: key is unknown"
}
return "knownhosts: key mismatch"
}
// RevokedError is returned if we found a key that was revoked.
type RevokedError struct {
Revoked KnownKey
}
func (r *RevokedError) Error() string {
return "knownhosts: key is revoked"
}
// check checks a key against the host database. This should not be
// used for verifying certificates.
func (db *hostKeyDB) check(address string, remote net.Addr, remoteKey ssh.PublicKey) error {
if revoked := db.revoked[string(remoteKey.Marshal())]; revoked != nil {
return &RevokedError{Revoked: *revoked}
}
host, port, err := net.SplitHostPort(remote.String())
if err != nil {
return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", remote, err)
}
hostToCheck := addr{host, port}
if address != "" {
// Give preference to the hostname if available.
host, port, err := net.SplitHostPort(address)
if err != nil {
return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", address, err)
}
hostToCheck = addr{host, port}
}
return db.checkAddr(hostToCheck, remoteKey)
}
// checkAddrs checks if we can find the given public key for any of
// the given addresses. If we only find an entry for the IP address,
// or only the hostname, then this still succeeds.
func (db *hostKeyDB) checkAddr(a addr, remoteKey ssh.PublicKey) error {
// TODO(hanwen): are these the right semantics? What if there
// is just a key for the IP address, but not for the
// hostname?
// Algorithm => key.
knownKeys := map[string]KnownKey{}
for _, l := range db.lines {
if l.match(a) {
typ := l.knownKey.Key.Type()
if _, ok := knownKeys[typ]; !ok {
knownKeys[typ] = l.knownKey
}
}
}
keyErr := &KeyError{}
for _, v := range knownKeys {
keyErr.Want = append(keyErr.Want, v)
}
// Unknown remote host.
if len(knownKeys) == 0 {
return keyErr
}
// If the remote host starts using a different, unknown key type, we
// also interpret that as a mismatch.
if known, ok := knownKeys[remoteKey.Type()]; !ok || !keyEq(known.Key, remoteKey) {
return keyErr
}
return nil
}
// The Read function parses file contents.
func (db *hostKeyDB) Read(r io.Reader, filename string) error {
scanner := bufio.NewScanner(r)
lineNum := 0
for scanner.Scan() {
lineNum++
line := scanner.Bytes()
line = bytes.TrimSpace(line)
if len(line) == 0 || line[0] == '#' {
continue
}
if err := db.parseLine(line, filename, lineNum); err != nil {
return fmt.Errorf("knownhosts: %s:%d: %v", filename, lineNum, err)
}
}
return scanner.Err()
}
// New creates a host key callback from the given OpenSSH host key
// files. The returned callback is for use in
// ssh.ClientConfig.HostKeyCallback. By preference, the key check
// operates on the hostname if available, i.e. if a server changes its
// IP address, the host key check will still succeed, even though a
// record of the new IP address is not available.
func New(files ...string) (ssh.HostKeyCallback, error) {
db := newHostKeyDB()
for _, fn := range files {
f, err := os.Open(fn)
if err != nil {
return nil, err
}
defer f.Close()
if err := db.Read(f, fn); err != nil {
return nil, err
}
}
var certChecker ssh.CertChecker
certChecker.IsHostAuthority = db.IsHostAuthority
certChecker.IsRevoked = db.IsRevoked
certChecker.HostKeyFallback = db.check
return certChecker.CheckHostKey, nil
}
// Normalize normalizes an address into the form used in known_hosts
func Normalize(address string) string {
host, port, err := net.SplitHostPort(address)
if err != nil {
host = address
port = "22"
}
entry := host
if port != "22" {
entry = "[" + entry + "]:" + port
} else if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") {
entry = "[" + entry + "]"
}
return entry
}
// Line returns a line to add append to the known_hosts files.
func Line(addresses []string, key ssh.PublicKey) string {
var trimmed []string
for _, a := range addresses {
trimmed = append(trimmed, Normalize(a))
}
return strings.Join(trimmed, ",") + " " + serialize(key)
}
// HashHostname hashes the given hostname. The hostname is not
// normalized before hashing.
func HashHostname(hostname string) string {
// TODO(hanwen): check if we can safely normalize this always.
salt := make([]byte, sha1.Size)
_, err := rand.Read(salt)
if err != nil {
panic(fmt.Sprintf("crypto/rand failure %v", err))
}
hash := hashHost(hostname, salt)
return encodeHash(sha1HashType, salt, hash)
}
func decodeHash(encoded string) (hashType string, salt, hash []byte, err error) {
if len(encoded) == 0 || encoded[0] != '|' {
err = errors.New("knownhosts: hashed host must start with '|'")
return
}
components := strings.Split(encoded, "|")
if len(components) != 4 {
err = fmt.Errorf("knownhosts: got %d components, want 3", len(components))
return
}
hashType = components[1]
if salt, err = base64.StdEncoding.DecodeString(components[2]); err != nil {
return
}
if hash, err = base64.StdEncoding.DecodeString(components[3]); err != nil {
return
}
return
}
func encodeHash(typ string, salt []byte, hash []byte) string {
return strings.Join([]string{"",
typ,
base64.StdEncoding.EncodeToString(salt),
base64.StdEncoding.EncodeToString(hash),
}, "|")
}
// See https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120
func hashHost(hostname string, salt []byte) []byte {
mac := hmac.New(sha1.New, salt)
mac.Write([]byte(hostname))
return mac.Sum(nil)
}
type hashedHost struct {
salt []byte
hash []byte
}
const sha1HashType = "1"
func newHashedHost(encoded string) (*hashedHost, error) {
typ, salt, hash, err := decodeHash(encoded)
if err != nil {
return nil, err
}
// The type field seems for future algorithm agility, but it's
// actually hardcoded in openssh currently, see
// https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120
if typ != sha1HashType {
return nil, fmt.Errorf("knownhosts: got hash type %s, must be '1'", typ)
}
return &hashedHost{salt: salt, hash: hash}, nil
}
func (h *hashedHost) match(a addr) bool {
return bytes.Equal(hashHost(Normalize(a.String()), h.salt), h.hash)
}

201
vendor/gopkg.in/src-d/go-billy.v4/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017 Sourced Technologies S.L.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

202
vendor/gopkg.in/src-d/go-billy.v4/fs.go generated vendored Normal file
View File

@ -0,0 +1,202 @@
package billy
import (
"errors"
"io"
"os"
"time"
)
var (
ErrReadOnly = errors.New("read-only filesystem")
ErrNotSupported = errors.New("feature not supported")
ErrCrossedBoundary = errors.New("chroot boundary crossed")
)
// Capability holds the supported features of a billy filesystem. This does
// not mean that the capability has to be supported by the underlying storage.
// For example, a billy filesystem may support WriteCapability but the
// storage be mounted in read only mode.
type Capability uint64
const (
// WriteCapability means that the fs is writable.
WriteCapability Capability = 1 << iota
// ReadCapability means that the fs is readable.
ReadCapability
// ReadAndWriteCapability is the ability to open a file in read and write mode.
ReadAndWriteCapability
// SeekCapability means it is able to move position inside the file.
SeekCapability
// TruncateCapability means that a file can be truncated.
TruncateCapability
// LockCapability is the ability to lock a file.
LockCapability
// DefaultCapabilities lists all capable features supported by filesystems
// without Capability interface. This list should not be changed until a
// major version is released.
DefaultCapabilities Capability = WriteCapability | ReadCapability |
ReadAndWriteCapability | SeekCapability | TruncateCapability |
LockCapability
// AllCapabilities lists all capable features.
AllCapabilities Capability = WriteCapability | ReadCapability |
ReadAndWriteCapability | SeekCapability | TruncateCapability |
LockCapability
)
// Filesystem abstract the operations in a storage-agnostic interface.
// Each method implementation mimics the behavior of the equivalent functions
// at the os package from the standard library.
type Filesystem interface {
Basic
TempFile
Dir
Symlink
Chroot
}
// Basic abstract the basic operations in a storage-agnostic interface as
// an extension to the Basic interface.
type Basic interface {
// Create creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned File can
// be used for I/O; the associated file descriptor has mode O_RDWR.
Create(filename string) (File, error)
// Open opens the named file for reading. If successful, methods on the
// returned file can be used for reading; the associated file descriptor has
// mode O_RDONLY.
Open(filename string) (File, error)
// OpenFile is the generalized open call; most users will use Open or Create
// instead. It opens the named file with specified flag (O_RDONLY etc.) and
// perm, (0666 etc.) if applicable. If successful, methods on the returned
// File can be used for I/O.
OpenFile(filename string, flag int, perm os.FileMode) (File, error)
// Stat returns a FileInfo describing the named file.
Stat(filename string) (os.FileInfo, error)
// Rename renames (moves) oldpath to newpath. If newpath already exists and
// is not a directory, Rename replaces it. OS-specific restrictions may
// apply when oldpath and newpath are in different directories.
Rename(oldpath, newpath string) error
// Remove removes the named file or directory.
Remove(filename string) error
// Join joins any number of path elements into a single path, adding a
// Separator if necessary. Join calls filepath.Clean on the result; in
// particular, all empty strings are ignored. On Windows, the result is a
// UNC path if and only if the first path element is a UNC path.
Join(elem ...string) string
}
type TempFile interface {
// TempFile creates a new temporary file in the directory dir with a name
// beginning with prefix, opens the file for reading and writing, and
// returns the resulting *os.File. If dir is the empty string, TempFile
// uses the default directory for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously will not choose the
// same file. The caller can use f.Name() to find the pathname of the file.
// It is the caller's responsibility to remove the file when no longer
// needed.
TempFile(dir, prefix string) (File, error)
}
// Dir abstract the dir related operations in a storage-agnostic interface as
// an extension to the Basic interface.
type Dir interface {
// ReadDir reads the directory named by dirname and returns a list of
// directory entries sorted by filename.
ReadDir(path string) ([]os.FileInfo, error)
// MkdirAll creates a directory named path, along with any necessary
// parents, and returns nil, or else returns an error. The permission bits
// perm are used for all directories that MkdirAll creates. If path is/
// already a directory, MkdirAll does nothing and returns nil.
MkdirAll(filename string, perm os.FileMode) error
}
// Symlink abstract the symlink related operations in a storage-agnostic
// interface as an extension to the Basic interface.
type Symlink interface {
// Lstat returns a FileInfo describing the named file. If the file is a
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
// makes no attempt to follow the link.
Lstat(filename string) (os.FileInfo, error)
// Symlink creates a symbolic-link from link to target. target may be an
// absolute or relative path, and need not refer to an existing node.
// Parent directories of link are created as necessary.
Symlink(target, link string) error
// Readlink returns the target path of link.
Readlink(link string) (string, error)
}
// Change abstract the FileInfo change related operations in a storage-agnostic
// interface as an extension to the Basic interface
type Change interface {
// Chmod changes the mode of the named file to mode. If the file is a
// symbolic link, it changes the mode of the link's target.
Chmod(name string, mode os.FileMode) error
// Lchown changes the numeric uid and gid of the named file. If the file is
// a symbolic link, it changes the uid and gid of the link itself.
Lchown(name string, uid, gid int) error
// Chown changes the numeric uid and gid of the named file. If the file is a
// symbolic link, it changes the uid and gid of the link's target.
Chown(name string, uid, gid int) error
// Chtimes changes the access and modification times of the named file,
// similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a less
// precise time unit.
Chtimes(name string, atime time.Time, mtime time.Time) error
}
// Chroot abstract the chroot related operations in a storage-agnostic interface
// as an extension to the Basic interface.
type Chroot interface {
// Chroot returns a new filesystem from the same type where the new root is
// the given path. Files outside of the designated directory tree cannot be
// accessed.
Chroot(path string) (Filesystem, error)
// Root returns the root path of the filesystem.
Root() string
}
// File represent a file, being a subset of the os.File
type File interface {
// Name returns the name of the file as presented to Open.
Name() string
io.Writer
io.Reader
io.ReaderAt
io.Seeker
io.Closer
// Lock locks the file like e.g. flock. It protects against access from
// other processes.
Lock() error
// Unlock unlocks the file.
Unlock() error
// Truncate the file.
Truncate(size int64) error
}
// Capable interface can return the available features of a filesystem.
type Capable interface {
// Capabilities returns the capabilities of a filesystem in bit flags.
Capabilities() Capability
}
// Capabilities returns the features supported by a filesystem. If the FS
// does not implement Capable interface it returns all features.
func Capabilities(fs Basic) Capability {
capable, ok := fs.(Capable)
if !ok {
return DefaultCapabilities
}
return capable.Capabilities()
}
// CapabilityCheck tests the filesystem for the provided capabilities and
// returns true in case it supports all of them.
func CapabilityCheck(fs Basic, capabilities Capability) bool {
fsCaps := Capabilities(fs)
return fsCaps&capabilities == capabilities
}

View File

@ -0,0 +1,242 @@
package chroot
import (
"os"
"path/filepath"
"strings"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-billy.v4/helper/polyfill"
)
// ChrootHelper is a helper to implement billy.Chroot.
type ChrootHelper struct {
underlying billy.Filesystem
base string
}
// New creates a new filesystem wrapping up the given 'fs'.
// The created filesystem has its base in the given ChrootHelperectory of the
// underlying filesystem.
func New(fs billy.Basic, base string) billy.Filesystem {
return &ChrootHelper{
underlying: polyfill.New(fs),
base: base,
}
}
func (fs *ChrootHelper) underlyingPath(filename string) (string, error) {
if isCrossBoundaries(filename) {
return "", billy.ErrCrossedBoundary
}
return fs.Join(fs.Root(), filename), nil
}
func isCrossBoundaries(path string) bool {
path = filepath.ToSlash(path)
path = filepath.Clean(path)
return strings.HasPrefix(path, ".."+string(filepath.Separator))
}
func (fs *ChrootHelper) Create(filename string) (billy.File, error) {
fullpath, err := fs.underlyingPath(filename)
if err != nil {
return nil, err
}
f, err := fs.underlying.Create(fullpath)
if err != nil {
return nil, err
}
return newFile(fs, f, filename), nil
}
func (fs *ChrootHelper) Open(filename string) (billy.File, error) {
fullpath, err := fs.underlyingPath(filename)
if err != nil {
return nil, err
}
f, err := fs.underlying.Open(fullpath)
if err != nil {
return nil, err
}
return newFile(fs, f, filename), nil
}
func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) {
fullpath, err := fs.underlyingPath(filename)
if err != nil {
return nil, err
}
f, err := fs.underlying.OpenFile(fullpath, flag, mode)
if err != nil {
return nil, err
}
return newFile(fs, f, filename), nil
}
func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) {
fullpath, err := fs.underlyingPath(filename)
if err != nil {
return nil, err
}
return fs.underlying.Stat(fullpath)
}
func (fs *ChrootHelper) Rename(from, to string) error {
var err error
from, err = fs.underlyingPath(from)
if err != nil {
return err
}
to, err = fs.underlyingPath(to)
if err != nil {
return err
}
return fs.underlying.Rename(from, to)
}
func (fs *ChrootHelper) Remove(path string) error {
fullpath, err := fs.underlyingPath(path)
if err != nil {
return err
}
return fs.underlying.Remove(fullpath)
}
func (fs *ChrootHelper) Join(elem ...string) string {
return fs.underlying.Join(elem...)
}
func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) {
fullpath, err := fs.underlyingPath(dir)
if err != nil {
return nil, err
}
f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix)
if err != nil {
return nil, err
}
return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil
}
func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) {
fullpath, err := fs.underlyingPath(path)
if err != nil {
return nil, err
}
return fs.underlying.(billy.Dir).ReadDir(fullpath)
}
func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error {
fullpath, err := fs.underlyingPath(filename)
if err != nil {
return err
}
return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm)
}
func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) {
fullpath, err := fs.underlyingPath(filename)
if err != nil {
return nil, err
}
return fs.underlying.(billy.Symlink).Lstat(fullpath)
}
func (fs *ChrootHelper) Symlink(target, link string) error {
target = filepath.FromSlash(target)
// only rewrite target if it's already absolute
if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) {
target = fs.Join(fs.Root(), target)
target = filepath.Clean(filepath.FromSlash(target))
}
link, err := fs.underlyingPath(link)
if err != nil {
return err
}
return fs.underlying.(billy.Symlink).Symlink(target, link)
}
func (fs *ChrootHelper) Readlink(link string) (string, error) {
fullpath, err := fs.underlyingPath(link)
if err != nil {
return "", err
}
target, err := fs.underlying.(billy.Symlink).Readlink(fullpath)
if err != nil {
return "", err
}
if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) {
return target, nil
}
target, err = filepath.Rel(fs.base, target)
if err != nil {
return "", err
}
return string(os.PathSeparator) + target, nil
}
func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) {
fullpath, err := fs.underlyingPath(path)
if err != nil {
return nil, err
}
return New(fs.underlying, fullpath), nil
}
func (fs *ChrootHelper) Root() string {
return fs.base
}
func (fs *ChrootHelper) Underlying() billy.Basic {
return fs.underlying
}
// Capabilities implements the Capable interface.
func (fs *ChrootHelper) Capabilities() billy.Capability {
return billy.Capabilities(fs.underlying)
}
type file struct {
billy.File
name string
}
func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File {
filename = fs.Join(fs.Root(), filename)
filename, _ = filepath.Rel(fs.Root(), filename)
return &file{
File: f,
name: filename,
}
}
func (f *file) Name() string {
return f.name
}

View File

@ -0,0 +1,105 @@
package polyfill
import (
"os"
"path/filepath"
"gopkg.in/src-d/go-billy.v4"
)
// Polyfill is a helper that implements all missing method from billy.Filesystem.
type Polyfill struct {
billy.Basic
c capabilities
}
type capabilities struct{ tempfile, dir, symlink, chroot bool }
// New creates a new filesystem wrapping up 'fs' the intercepts all the calls
// made and errors if fs doesn't implement any of the billy interfaces.
func New(fs billy.Basic) billy.Filesystem {
if original, ok := fs.(billy.Filesystem); ok {
return original
}
h := &Polyfill{Basic: fs}
_, h.c.tempfile = h.Basic.(billy.TempFile)
_, h.c.dir = h.Basic.(billy.Dir)
_, h.c.symlink = h.Basic.(billy.Symlink)
_, h.c.chroot = h.Basic.(billy.Chroot)
return h
}
func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) {
if !h.c.tempfile {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.TempFile).TempFile(dir, prefix)
}
func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) {
if !h.c.dir {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.Dir).ReadDir(path)
}
func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error {
if !h.c.dir {
return billy.ErrNotSupported
}
return h.Basic.(billy.Dir).MkdirAll(filename, perm)
}
func (h *Polyfill) Symlink(target, link string) error {
if !h.c.symlink {
return billy.ErrNotSupported
}
return h.Basic.(billy.Symlink).Symlink(target, link)
}
func (h *Polyfill) Readlink(link string) (string, error) {
if !h.c.symlink {
return "", billy.ErrNotSupported
}
return h.Basic.(billy.Symlink).Readlink(link)
}
func (h *Polyfill) Lstat(path string) (os.FileInfo, error) {
if !h.c.symlink {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.Symlink).Lstat(path)
}
func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) {
if !h.c.chroot {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.Chroot).Chroot(path)
}
func (h *Polyfill) Root() string {
if !h.c.chroot {
return string(filepath.Separator)
}
return h.Basic.(billy.Chroot).Root()
}
func (h *Polyfill) Underlying() billy.Basic {
return h.Basic
}
// Capabilities implements the Capable interface.
func (h *Polyfill) Capabilities() billy.Capability {
return billy.Capabilities(h.Basic)
}

139
vendor/gopkg.in/src-d/go-billy.v4/osfs/os.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
// Package osfs provides a billy filesystem for the OS.
package osfs // import "gopkg.in/src-d/go-billy.v4/osfs"
import (
"io/ioutil"
"os"
"path/filepath"
"sync"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-billy.v4/helper/chroot"
)
const (
defaultDirectoryMode = 0755
defaultCreateMode = 0666
)
// OS is a filesystem based on the os filesystem.
type OS struct{}
// New returns a new OS filesystem.
func New(baseDir string) billy.Filesystem {
return chroot.New(&OS{}, baseDir)
}
func (fs *OS) Create(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
}
func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
if flag&os.O_CREATE != 0 {
if err := fs.createDir(filename); err != nil {
return nil, err
}
}
f, err := os.OpenFile(filename, flag, perm)
if err != nil {
return nil, err
}
return &file{File: f}, err
}
func (fs *OS) createDir(fullpath string) error {
dir := filepath.Dir(fullpath)
if dir != "." {
if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
return err
}
}
return nil
}
func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) {
l, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var s = make([]os.FileInfo, len(l))
for i, f := range l {
s[i] = f
}
return s, nil
}
func (fs *OS) Rename(from, to string) error {
if err := fs.createDir(to); err != nil {
return err
}
return os.Rename(from, to)
}
func (fs *OS) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, defaultDirectoryMode)
}
func (fs *OS) Open(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
func (fs *OS) Stat(filename string) (os.FileInfo, error) {
return os.Stat(filename)
}
func (fs *OS) Remove(filename string) error {
return os.Remove(filename)
}
func (fs *OS) TempFile(dir, prefix string) (billy.File, error) {
if err := fs.createDir(dir + string(os.PathSeparator)); err != nil {
return nil, err
}
f, err := ioutil.TempFile(dir, prefix)
if err != nil {
return nil, err
}
return &file{File: f}, nil
}
func (fs *OS) Join(elem ...string) string {
return filepath.Join(elem...)
}
func (fs *OS) RemoveAll(path string) error {
return os.RemoveAll(filepath.Clean(path))
}
func (fs *OS) Lstat(filename string) (os.FileInfo, error) {
return os.Lstat(filepath.Clean(filename))
}
func (fs *OS) Symlink(target, link string) error {
if err := fs.createDir(link); err != nil {
return err
}
return os.Symlink(target, link)
}
func (fs *OS) Readlink(link string) (string, error) {
return os.Readlink(link)
}
// Capabilities implements the Capable interface.
func (fs *OS) Capabilities() billy.Capability {
return billy.DefaultCapabilities
}
// file is a wrapper for an os.File which adds support for file locking.
type file struct {
*os.File
m sync.Mutex
}

21
vendor/gopkg.in/src-d/go-billy.v4/osfs/os_posix.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// +build !windows
package osfs
import (
"syscall"
)
func (f *file) Lock() error {
f.m.Lock()
defer f.m.Unlock()
return syscall.Flock(int(f.File.Fd()), syscall.LOCK_EX)
}
func (f *file) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
return syscall.Flock(int(f.File.Fd()), syscall.LOCK_UN)
}

Some files were not shown because too many files have changed in this diff Show More