mirror of
1
Fork 0

Use git log name-status in get last commit (#16059)

* Improve get last commit using git log --name-status

git log --name-status -c provides information about the diff between a
commit and its parents. Using this and adjusting the algorithm to use
the first change to a path allows for a much faster generation of commit
info.

There is a subtle change in the results generated but this will cause
the results to more closely match those from elsewhere.

Signed-off-by: Andrew Thornton <art27@cantab.net>

Co-authored-by: 6543 <6543@obermui.de>
Co-authored-by: techknowlogick <techknowlogick@gitea.io>
Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
zeripath 2021-06-20 23:00:46 +01:00 committed by GitHub
parent 8fa3bbc424
commit 23358bc55d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 2540 additions and 297 deletions

2
go.mod
View File

@ -30,6 +30,8 @@ require (
github.com/couchbase/goutils v0.0.0-20210118111533-e33d3ffb5401 // indirect github.com/couchbase/goutils v0.0.0-20210118111533-e33d3ffb5401 // indirect
github.com/denisenkom/go-mssqldb v0.10.0 github.com/denisenkom/go-mssqldb v0.10.0
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/djherbis/buffer v1.2.0
github.com/djherbis/nio/v3 v3.0.1
github.com/dustin/go-humanize v1.0.0 github.com/dustin/go-humanize v1.0.0
github.com/editorconfig/editorconfig-core-go/v2 v2.4.2 github.com/editorconfig/editorconfig-core-go/v2 v2.4.2
github.com/emirpasic/gods v1.12.0 github.com/emirpasic/gods v1.12.0

5
go.sum
View File

@ -244,6 +244,11 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/djherbis/buffer v1.1.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o=
github.com/djherbis/buffer v1.2.0 h1:PH5Dd2ss0C7CRRhQCZ2u7MssF+No9ide8Ye71nPHcrQ=
github.com/djherbis/buffer v1.2.0/go.mod h1:fjnebbZjCUpPinBRD+TDwXSOeNQ7fPQWLfGQqiAiUyE=
github.com/djherbis/nio/v3 v3.0.1 h1:6wxhnuppteMa6RHA4L81Dq7ThkZH8SwnDzXDYy95vB4=
github.com/djherbis/nio/v3 v3.0.1/go.mod h1:Ng4h80pbZFMla1yKzm61cF0tqqilXZYrogmWgZxOcmg=
github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=

View File

@ -11,6 +11,9 @@ import (
"math" "math"
"strconv" "strconv"
"strings" "strings"
"github.com/djherbis/buffer"
"github.com/djherbis/nio/v3"
) )
// WriteCloserError wraps an io.WriteCloser with an additional CloseWithError function // WriteCloserError wraps an io.WriteCloser with an additional CloseWithError function
@ -42,7 +45,7 @@ func CatFileBatchCheck(repoPath string) (WriteCloserError, *bufio.Reader, func()
} }
}() }()
// For simplicities sake we'll us a buffered reader to read from the cat-file --batch // For simplicities sake we'll use a buffered reader to read from the cat-file --batch-check
batchReader := bufio.NewReader(batchStdoutReader) batchReader := bufio.NewReader(batchStdoutReader)
return batchStdinWriter, batchReader, cancel return batchStdinWriter, batchReader, cancel
@ -53,7 +56,7 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) {
// We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary. // We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary.
// so let's create a batch stdin and stdout // so let's create a batch stdin and stdout
batchStdinReader, batchStdinWriter := io.Pipe() batchStdinReader, batchStdinWriter := io.Pipe()
batchStdoutReader, batchStdoutWriter := io.Pipe() batchStdoutReader, batchStdoutWriter := nio.Pipe(buffer.New(32 * 1024))
cancel := func() { cancel := func() {
_ = batchStdinReader.Close() _ = batchStdinReader.Close()
_ = batchStdinWriter.Close() _ = batchStdinWriter.Close()
@ -74,7 +77,7 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) {
}() }()
// For simplicities sake we'll us a buffered reader to read from the cat-file --batch // For simplicities sake we'll us a buffered reader to read from the cat-file --batch
batchReader := bufio.NewReader(batchStdoutReader) batchReader := bufio.NewReaderSize(batchStdoutReader, 32*1024)
return batchStdinWriter, batchReader, cancel return batchStdinWriter, batchReader, cancel
} }
@ -84,22 +87,31 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) {
// <sha> SP <type> SP <size> LF // <sha> SP <type> SP <size> LF
// sha is a 40byte not 20byte here // sha is a 40byte not 20byte here
func ReadBatchLine(rd *bufio.Reader) (sha []byte, typ string, size int64, err error) { func ReadBatchLine(rd *bufio.Reader) (sha []byte, typ string, size int64, err error) {
sha, err = rd.ReadBytes(' ')
if err != nil {
return
}
sha = sha[:len(sha)-1]
typ, err = rd.ReadString('\n') typ, err = rd.ReadString('\n')
if err != nil { if err != nil {
return return
} }
if len(typ) == 1 {
typ, err = rd.ReadString('\n')
if err != nil {
return
}
}
idx := strings.IndexByte(typ, ' ')
if idx < 0 {
log("missing space typ: %s", typ)
err = ErrNotExist{ID: string(sha)}
return
}
sha = []byte(typ[:idx])
typ = typ[idx+1:]
idx := strings.Index(typ, " ") idx = strings.IndexByte(typ, ' ')
if idx < 0 { if idx < 0 {
err = ErrNotExist{ID: string(sha)} err = ErrNotExist{ID: string(sha)}
return return
} }
sizeStr := typ[idx+1 : len(typ)-1] sizeStr := typ[idx+1 : len(typ)-1]
typ = typ[:idx] typ = typ[:idx]
@ -130,7 +142,7 @@ headerLoop:
} }
// Discard the rest of the tag // Discard the rest of the tag
discard := size - n discard := size - n + 1
for discard > math.MaxInt32 { for discard > math.MaxInt32 {
_, err := rd.Discard(math.MaxInt32) _, err := rd.Discard(math.MaxInt32)
if err != nil { if err != nil {
@ -200,57 +212,6 @@ func To40ByteSHA(sha, out []byte) []byte {
return out return out
} }
// ParseTreeLineSkipMode reads an entry from a tree in a cat-file --batch stream
// This simply skips the mode - saving a substantial amount of time and carefully avoids allocations - except where fnameBuf is too small.
// It is recommended therefore to pass in an fnameBuf large enough to avoid almost all allocations
//
// Each line is composed of:
// <mode-in-ascii-dropping-initial-zeros> SP <fname> NUL <20-byte SHA>
//
// We don't attempt to convert the 20-byte SHA to 40-byte SHA to save a lot of time
func ParseTreeLineSkipMode(rd *bufio.Reader, fnameBuf, shaBuf []byte) (fname, sha []byte, n int, err error) {
var readBytes []byte
// Skip the Mode
readBytes, err = rd.ReadSlice(' ') // NB: DOES NOT ALLOCATE SIMPLY RETURNS SLICE WITHIN READER BUFFER
if err != nil {
return
}
n += len(readBytes)
// Deal with the fname
readBytes, err = rd.ReadSlice('\x00')
copy(fnameBuf, readBytes)
if len(fnameBuf) > len(readBytes) {
fnameBuf = fnameBuf[:len(readBytes)] // cut the buf the correct size
} else {
fnameBuf = append(fnameBuf, readBytes[len(fnameBuf):]...) // extend the buf and copy in the missing bits
}
for err == bufio.ErrBufferFull { // Then we need to read more
readBytes, err = rd.ReadSlice('\x00')
fnameBuf = append(fnameBuf, readBytes...) // there is little point attempting to avoid allocations here so just extend
}
n += len(fnameBuf)
if err != nil {
return
}
fnameBuf = fnameBuf[:len(fnameBuf)-1] // Drop the terminal NUL
fname = fnameBuf // set the returnable fname to the slice
// Now deal with the 20-byte SHA
idx := 0
for idx < 20 {
read := 0
read, err = rd.Read(shaBuf[idx:20])
n += read
if err != nil {
return
}
idx += read
}
sha = shaBuf
return
}
// ParseTreeLine reads an entry from a tree in a cat-file --batch stream // ParseTreeLine reads an entry from a tree in a cat-file --batch stream
// This carefully avoids allocations - except where fnameBuf is too small. // This carefully avoids allocations - except where fnameBuf is too small.
// It is recommended therefore to pass in an fnameBuf large enough to avoid almost all allocations // It is recommended therefore to pass in an fnameBuf large enough to avoid almost all allocations
@ -262,23 +223,31 @@ func ParseTreeLineSkipMode(rd *bufio.Reader, fnameBuf, shaBuf []byte) (fname, sh
func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fname, sha []byte, n int, err error) { func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fname, sha []byte, n int, err error) {
var readBytes []byte var readBytes []byte
// Read the Mode // Read the Mode & fname
readBytes, err = rd.ReadSlice(' ') readBytes, err = rd.ReadSlice('\x00')
if err != nil { if err != nil {
return return
} }
n += len(readBytes) idx := bytes.IndexByte(readBytes, ' ')
copy(modeBuf, readBytes) if idx < 0 {
if len(modeBuf) > len(readBytes) { log("missing space in readBytes ParseTreeLine: %s", readBytes)
modeBuf = modeBuf[:len(readBytes)]
} else {
modeBuf = append(modeBuf, readBytes[len(modeBuf):]...)
err = &ErrNotExist{}
return
} }
mode = modeBuf[:len(modeBuf)-1] // Drop the SP
n += idx + 1
copy(modeBuf, readBytes[:idx])
if len(modeBuf) >= idx {
modeBuf = modeBuf[:idx]
} else {
modeBuf = append(modeBuf, readBytes[len(modeBuf):idx]...)
}
mode = modeBuf
readBytes = readBytes[idx+1:]
// Deal with the fname // Deal with the fname
readBytes, err = rd.ReadSlice('\x00')
copy(fnameBuf, readBytes) copy(fnameBuf, readBytes)
if len(fnameBuf) > len(readBytes) { if len(fnameBuf) > len(readBytes) {
fnameBuf = fnameBuf[:len(readBytes)] fnameBuf = fnameBuf[:len(readBytes)]
@ -297,7 +266,7 @@ func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fn
fname = fnameBuf fname = fnameBuf
// Deal with the 20-byte SHA // Deal with the 20-byte SHA
idx := 0 idx = 0
for idx < 20 { for idx < 20 {
read := 0 read := 0
read, err = rd.Read(shaBuf[idx:20]) read, err = rd.Read(shaBuf[idx:20])

View File

@ -7,15 +7,11 @@
package git package git
import ( import (
"bufio"
"bytes"
"context" "context"
"fmt" "fmt"
"io" "io"
"math"
"path" "path"
"sort" "sort"
"strings"
) )
// GetCommitsInfo gets information of all commits that are corresponding to these entries // GetCommitsInfo gets information of all commits that are corresponding to these entries
@ -43,21 +39,16 @@ func (tes Entries) GetCommitsInfo(ctx context.Context, commit *Commit, treePath
return nil, nil, err return nil, nil, err
} }
for i, found := range commits { for pth, found := range commits {
if err := cache.Put(commit.ID.String(), path.Join(treePath, unHitPaths[i]), found.ID.String()); err != nil { if err := cache.Put(commit.ID.String(), path.Join(treePath, pth), found.ID.String()); err != nil {
return nil, nil, err return nil, nil, err
} }
revs[unHitPaths[i]] = found revs[pth] = found
} }
} }
} else { } else {
sort.Strings(entryPaths) sort.Strings(entryPaths)
revs = map[string]*Commit{} revs, err = GetLastCommitForPaths(ctx, commit, treePath, entryPaths)
var foundCommits []*Commit
foundCommits, err = GetLastCommitForPaths(ctx, commit, treePath, entryPaths)
for i, found := range foundCommits {
revs[entryPaths[i]] = found
}
} }
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -86,6 +77,8 @@ func (tes Entries) GetCommitsInfo(ctx context.Context, commit *Commit, treePath
subModuleFile := NewSubModuleFile(entryCommit, subModuleURL, entry.ID.String()) subModuleFile := NewSubModuleFile(entryCommit, subModuleURL, entry.ID.String())
commitsInfo[i].SubModuleFile = subModuleFile commitsInfo[i].SubModuleFile = subModuleFile
} }
} else {
log("missing commit for %s", entry.Name())
} }
} }
@ -125,220 +118,24 @@ func getLastCommitForPathsByCache(ctx context.Context, commitID, treePath string
} }
// GetLastCommitForPaths returns last commit information // GetLastCommitForPaths returns last commit information
func GetLastCommitForPaths(ctx context.Context, commit *Commit, treePath string, paths []string) ([]*Commit, error) { func GetLastCommitForPaths(ctx context.Context, commit *Commit, treePath string, paths []string) (map[string]*Commit, error) {
// We read backwards from the commit to obtain all of the commits // We read backwards from the commit to obtain all of the commits
revs, err := WalkGitLog(ctx, commit.repo, commit, treePath, paths...)
// We'll do this by using rev-list to provide us with parent commits in order if err != nil {
revListReader, revListWriter := io.Pipe() return nil, err
defer func() { }
_ = revListWriter.Close()
_ = revListReader.Close()
}()
go func() {
stderr := strings.Builder{}
err := NewCommand("rev-list", "--format=%T", commit.ID.String()).SetParentContext(ctx).RunInDirPipeline(commit.repo.Path, revListWriter, &stderr)
if err != nil {
_ = revListWriter.CloseWithError(ConcatenateError(err, (&stderr).String()))
} else {
_ = revListWriter.Close()
}
}()
batchStdinWriter, batchReader, cancel := commit.repo.CatFileBatch() batchStdinWriter, batchReader, cancel := commit.repo.CatFileBatch()
defer cancel() defer cancel()
mapsize := 4096 commitsMap := map[string]*Commit{}
if len(paths) > mapsize {
mapsize = len(paths)
}
path2idx := make(map[string]int, mapsize)
for i, path := range paths {
path2idx[path] = i
}
fnameBuf := make([]byte, 4096)
modeBuf := make([]byte, 40)
allShaBuf := make([]byte, (len(paths)+1)*20)
shaBuf := make([]byte, 20)
tmpTreeID := make([]byte, 40)
// commits is the returnable commits matching the paths provided
commits := make([]string, len(paths))
// ids are the blob/tree ids for the paths
ids := make([][]byte, len(paths))
// We'll use a scanner for the revList because it's simpler than a bufio.Reader
scan := bufio.NewScanner(revListReader)
revListLoop:
for scan.Scan() {
// Get the next parent commit ID
commitID := scan.Text()
if !scan.Scan() {
break revListLoop
}
commitID = commitID[7:]
rootTreeID := scan.Text()
// push the tree to the cat-file --batch process
_, err := batchStdinWriter.Write([]byte(rootTreeID + "\n"))
if err != nil {
return nil, err
}
currentPath := ""
// OK if the target tree path is "" and the "" is in the paths just set this now
if treePath == "" && paths[0] == "" {
// If this is the first time we see this set the id appropriate for this paths to this tree and set the last commit to curCommit
if len(ids[0]) == 0 {
ids[0] = []byte(rootTreeID)
commits[0] = string(commitID)
} else if bytes.Equal(ids[0], []byte(rootTreeID)) {
commits[0] = string(commitID)
}
}
treeReadingLoop:
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
_, _, size, err := ReadBatchLine(batchReader)
if err != nil {
return nil, err
}
// Handle trees
// n is counter for file position in the tree file
var n int64
// Two options: currentPath is the targetTreepath
if treePath == currentPath {
// We are in the right directory
// Parse each tree line in turn. (don't care about mode here.)
for n < size {
fname, sha, count, err := ParseTreeLineSkipMode(batchReader, fnameBuf, shaBuf)
shaBuf = sha
if err != nil {
return nil, err
}
n += int64(count)
idx, ok := path2idx[string(fname)]
if ok {
// Now if this is the first time round set the initial Blob(ish) SHA ID and the commit
if len(ids[idx]) == 0 {
copy(allShaBuf[20*(idx+1):20*(idx+2)], shaBuf)
ids[idx] = allShaBuf[20*(idx+1) : 20*(idx+2)]
commits[idx] = string(commitID)
} else if bytes.Equal(ids[idx], shaBuf) {
commits[idx] = string(commitID)
}
}
// FIXME: is there any order to the way strings are emitted from cat-file?
// if there is - then we could skip once we've passed all of our data
}
if _, err := batchReader.Discard(1); err != nil {
return nil, err
}
break treeReadingLoop
}
var treeID []byte
// We're in the wrong directory
// Find target directory in this directory
idx := len(currentPath)
if idx > 0 {
idx++
}
target := strings.SplitN(treePath[idx:], "/", 2)[0]
for n < size {
// Read each tree entry in turn
mode, fname, sha, count, err := ParseTreeLine(batchReader, modeBuf, fnameBuf, shaBuf)
if err != nil {
return nil, err
}
n += int64(count)
// if we have found the target directory
if bytes.Equal(fname, []byte(target)) && bytes.Equal(mode, []byte("40000")) {
copy(tmpTreeID, sha)
treeID = tmpTreeID
break
}
}
if n < size {
// Discard any remaining entries in the current tree
discard := size - n
for discard > math.MaxInt32 {
_, err := batchReader.Discard(math.MaxInt32)
if err != nil {
return nil, err
}
discard -= math.MaxInt32
}
_, err := batchReader.Discard(int(discard))
if err != nil {
return nil, err
}
}
if _, err := batchReader.Discard(1); err != nil {
return nil, err
}
// if we haven't found a treeID for the target directory our search is over
if len(treeID) == 0 {
break treeReadingLoop
}
// add the target to the current path
if idx > 0 {
currentPath += "/"
}
currentPath += target
// if we've now found the current path check its sha id and commit status
if treePath == currentPath && paths[0] == "" {
if len(ids[0]) == 0 {
copy(allShaBuf[0:20], treeID)
ids[0] = allShaBuf[0:20]
commits[0] = string(commitID)
} else if bytes.Equal(ids[0], treeID) {
commits[0] = string(commitID)
}
}
treeID = To40ByteSHA(treeID, treeID)
_, err = batchStdinWriter.Write(treeID)
if err != nil {
return nil, err
}
_, err = batchStdinWriter.Write([]byte("\n"))
if err != nil {
return nil, err
}
}
}
if scan.Err() != nil {
return nil, scan.Err()
}
commitsMap := make(map[string]*Commit, len(commits))
commitsMap[commit.ID.String()] = commit commitsMap[commit.ID.String()] = commit
commitCommits := make([]*Commit, len(commits)) commitCommits := map[string]*Commit{}
for i, commitID := range commits { for path, commitID := range revs {
c, ok := commitsMap[commitID] c, ok := commitsMap[commitID]
if ok { if ok {
commitCommits[i] = c commitCommits[path] = c
continue continue
} }
@ -364,8 +161,8 @@ revListLoop:
if _, err := batchReader.Discard(1); err != nil { if _, err := batchReader.Discard(1); err != nil {
return nil, err return nil, err
} }
commitCommits[i] = c commitCommits[path] = c
} }
return commitCommits, scan.Err() return commitCommits, nil
} }

View File

@ -88,9 +88,8 @@ func (c *LastCommitCache) recursiveCache(ctx context.Context, commit *Commit, tr
return err return err
} }
for i, entryCommit := range commits { for entry, entryCommit := range commits {
entry := entryPaths[i] if err := c.Put(commit.ID.String(), path.Join(treePath, entry), entryCommit.ID.String()); err != nil {
if err := c.Put(commit.ID.String(), path.Join(treePath, entryPaths[i]), entryCommit.ID.String()); err != nil {
return err return err
} }
if entryMap[entry].IsDir() { if entryMap[entry].IsDir() {

View File

@ -0,0 +1,398 @@
// Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"bufio"
"bytes"
"context"
"io"
"path"
"sort"
"strings"
"github.com/djherbis/buffer"
"github.com/djherbis/nio/v3"
)
// LogNameStatusRepo opens git log --raw in the provided repo and returns a stdin pipe, a stdout reader and cancel function
func LogNameStatusRepo(repository, head, treepath string, paths ...string) (*bufio.Reader, func()) {
// We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary.
// so let's create a batch stdin and stdout
stdoutReader, stdoutWriter := nio.Pipe(buffer.New(32 * 1024))
cancel := func() {
_ = stdoutReader.Close()
_ = stdoutWriter.Close()
}
args := make([]string, 0, 8+len(paths))
args = append(args, "log", "--name-status", "-c", "--format=commit%x00%H %P%x00", "--parents", "--no-renames", "-t", "-z", head, "--")
if len(paths) < 70 {
if treepath != "" {
args = append(args, treepath)
for _, pth := range paths {
if pth != "" {
args = append(args, path.Join(treepath, pth))
}
}
} else {
for _, pth := range paths {
if pth != "" {
args = append(args, pth)
}
}
}
} else if treepath != "" {
args = append(args, treepath)
}
go func() {
stderr := strings.Builder{}
err := NewCommand(args...).RunInDirFullPipeline(repository, stdoutWriter, &stderr, nil)
if err != nil {
_ = stdoutWriter.CloseWithError(ConcatenateError(err, (&stderr).String()))
} else {
_ = stdoutWriter.Close()
}
}()
// For simplicities sake we'll us a buffered reader to read from the cat-file --batch
bufReader := bufio.NewReaderSize(stdoutReader, 32*1024)
return bufReader, cancel
}
// LogNameStatusRepoParser parses a git log raw output from LogRawRepo
type LogNameStatusRepoParser struct {
treepath string
paths []string
next []byte
buffull bool
rd *bufio.Reader
cancel func()
}
// NewLogNameStatusRepoParser returns a new parser for a git log raw output
func NewLogNameStatusRepoParser(repository, head, treepath string, paths ...string) *LogNameStatusRepoParser {
rd, cancel := LogNameStatusRepo(repository, head, treepath, paths...)
return &LogNameStatusRepoParser{
treepath: treepath,
paths: paths,
rd: rd,
cancel: cancel,
}
}
// LogNameStatusCommitData represents a commit artefact from git log raw
type LogNameStatusCommitData struct {
CommitID string
ParentIDs []string
Paths []bool
}
// Next returns the next LogStatusCommitData
func (g *LogNameStatusRepoParser) Next(treepath string, paths2ids map[string]int, changed []bool, maxpathlen int) (*LogNameStatusCommitData, error) {
var err error
if g.next == nil || len(g.next) == 0 {
g.buffull = false
g.next, err = g.rd.ReadSlice('\x00')
if err != nil {
if err == bufio.ErrBufferFull {
g.buffull = true
} else if err == io.EOF {
return nil, nil
} else {
return nil, err
}
}
}
ret := LogNameStatusCommitData{}
if bytes.Equal(g.next, []byte("commit\000")) {
g.next, err = g.rd.ReadSlice('\x00')
if err != nil {
if err == bufio.ErrBufferFull {
g.buffull = true
} else if err == io.EOF {
return nil, nil
} else {
return nil, err
}
}
}
// Our "line" must look like: <commitid> SP (<parent> SP) * NUL
ret.CommitID = string(g.next[0:40])
parents := string(g.next[41:])
if g.buffull {
more, err := g.rd.ReadString('\x00')
if err != nil {
return nil, err
}
parents += more
}
parents = parents[:len(parents)-1]
ret.ParentIDs = strings.Split(parents, " ")
// now read the next "line"
g.buffull = false
g.next, err = g.rd.ReadSlice('\x00')
if err != nil {
if err == bufio.ErrBufferFull {
g.buffull = true
} else if err != io.EOF {
return nil, err
}
}
if err == io.EOF || !(g.next[0] == '\n' || g.next[0] == '\000') {
return &ret, nil
}
// Ok we have some changes.
// This line will look like: NL <fname> NUL
//
// Subsequent lines will not have the NL - so drop it here - g.bufffull must also be false at this point too.
if g.next[0] == '\n' {
g.next = g.next[1:]
} else {
g.buffull = false
g.next, err = g.rd.ReadSlice('\x00')
if err != nil {
if err == bufio.ErrBufferFull {
g.buffull = true
} else if err != io.EOF {
return nil, err
}
}
if g.next[0] == '\x00' {
g.buffull = false
g.next, err = g.rd.ReadSlice('\x00')
if err != nil {
if err == bufio.ErrBufferFull {
g.buffull = true
} else if err != io.EOF {
return nil, err
}
}
}
}
fnameBuf := make([]byte, 4096)
diffloop:
for {
if err == io.EOF || bytes.Equal(g.next, []byte("commit\000")) {
return &ret, nil
}
g.next, err = g.rd.ReadSlice('\x00')
if err != nil {
if err == bufio.ErrBufferFull {
g.buffull = true
} else if err == io.EOF {
return &ret, nil
} else {
return nil, err
}
}
copy(fnameBuf, g.next)
if len(fnameBuf) < len(g.next) {
fnameBuf = append(fnameBuf, g.next[len(fnameBuf):]...)
} else {
fnameBuf = fnameBuf[:len(g.next)]
}
if err != nil {
if err != bufio.ErrBufferFull {
return nil, err
}
more, err := g.rd.ReadBytes('\x00')
if err != nil {
return nil, err
}
fnameBuf = append(fnameBuf, more...)
}
// read the next line
g.buffull = false
g.next, err = g.rd.ReadSlice('\x00')
if err != nil {
if err == bufio.ErrBufferFull {
g.buffull = true
} else if err != io.EOF {
return nil, err
}
}
if treepath != "" {
if !bytes.HasPrefix(fnameBuf, []byte(treepath)) {
fnameBuf = fnameBuf[:cap(fnameBuf)]
continue diffloop
}
}
fnameBuf = fnameBuf[len(treepath) : len(fnameBuf)-1]
if len(fnameBuf) > maxpathlen {
fnameBuf = fnameBuf[:cap(fnameBuf)]
continue diffloop
}
if len(fnameBuf) > 0 {
if len(treepath) > 0 {
if fnameBuf[0] != '/' || bytes.IndexByte(fnameBuf[1:], '/') >= 0 {
fnameBuf = fnameBuf[:cap(fnameBuf)]
continue diffloop
}
fnameBuf = fnameBuf[1:]
} else if bytes.IndexByte(fnameBuf, '/') >= 0 {
fnameBuf = fnameBuf[:cap(fnameBuf)]
continue diffloop
}
}
idx, ok := paths2ids[string(fnameBuf)]
if !ok {
fnameBuf = fnameBuf[:cap(fnameBuf)]
continue diffloop
}
if ret.Paths == nil {
ret.Paths = changed
}
changed[idx] = true
}
}
// Close closes the parser
func (g *LogNameStatusRepoParser) Close() {
g.cancel()
}
// WalkGitLog walks the git log --name-status for the head commit in the provided treepath and files
func WalkGitLog(ctx context.Context, repo *Repository, head *Commit, treepath string, paths ...string) (map[string]string, error) {
tree, err := head.SubTree(treepath)
if err != nil {
return nil, err
}
entries, err := tree.ListEntries()
if err != nil {
return nil, err
}
if len(paths) == 0 {
paths = make([]string, 0, len(entries)+1)
paths = append(paths, "")
for _, entry := range entries {
paths = append(paths, entry.Name())
}
} else {
sort.Strings(paths)
if paths[0] != "" {
paths = append([]string{""}, paths...)
}
// remove duplicates
for i := len(paths) - 1; i > 0; i-- {
if paths[i] == paths[i-1] {
paths = append(paths[:i-1], paths[i:]...)
}
}
}
path2idx := map[string]int{}
maxpathlen := len(treepath)
for i := range paths {
path2idx[paths[i]] = i
pthlen := len(paths[i]) + len(treepath) + 1
if pthlen > maxpathlen {
maxpathlen = pthlen
}
}
g := NewLogNameStatusRepoParser(repo.Path, head.ID.String(), treepath, paths...)
defer g.Close()
results := make([]string, len(paths))
remaining := len(paths)
nextRestart := (len(paths) * 3) / 4
if nextRestart > 70 {
nextRestart = 70
}
lastEmptyParent := head.ID.String()
commitSinceLastEmptyParent := uint64(0)
commitSinceNextRestart := uint64(0)
parentRemaining := map[string]bool{}
changed := make([]bool, len(paths))
heaploop:
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
current, err := g.Next(treepath, path2idx, changed, maxpathlen)
if err != nil {
g.Close()
return nil, err
}
if current == nil {
break heaploop
}
delete(parentRemaining, current.CommitID)
if current.Paths != nil {
for i, found := range current.Paths {
if !found {
continue
}
changed[i] = false
if results[i] == "" {
results[i] = current.CommitID
delete(path2idx, paths[i])
remaining--
if results[0] == "" {
results[0] = current.CommitID
delete(path2idx, "")
remaining--
}
}
}
}
if remaining <= 0 {
break heaploop
}
commitSinceLastEmptyParent++
if len(parentRemaining) == 0 {
lastEmptyParent = current.CommitID
commitSinceLastEmptyParent = 0
}
if remaining <= nextRestart {
commitSinceNextRestart++
if 4*commitSinceNextRestart > 3*commitSinceLastEmptyParent {
g.Close()
remainingPaths := make([]string, 0, len(paths))
for i, pth := range paths {
if results[i] == "" {
remainingPaths = append(remainingPaths, pth)
}
}
g = NewLogNameStatusRepoParser(repo.Path, lastEmptyParent, treepath, remainingPaths...)
parentRemaining = map[string]bool{}
nextRestart = (remaining * 3) / 4
continue heaploop
}
}
for _, parent := range current.ParentIDs {
parentRemaining[parent] = true
}
}
g.Close()
resultsMap := map[string]string{}
for i, pth := range paths {
resultsMap[pth] = results[i]
}
return resultsMap, nil
}

View File

@ -68,7 +68,7 @@ func GetNote(ctx context.Context, repo *Repository, commitID string, note *Note)
if err != nil { if err != nil {
return err return err
} }
note.Commit = lastCommits[0] note.Commit = lastCommits[path]
return nil return nil
} }

View File

@ -116,6 +116,9 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if _, err := batchReader.Discard(1); err != nil {
return nil, err
}
_, err := batchStdinWriter.Write([]byte(curCommit.Tree.ID.String() + "\n")) _, err := batchStdinWriter.Write([]byte(curCommit.Tree.ID.String() + "\n"))
if err != nil { if err != nil {
@ -146,6 +149,9 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) {
paths = append(paths, curPath+string(fname)+"/") paths = append(paths, curPath+string(fname)+"/")
} }
} }
if _, err := batchReader.Discard(1); err != nil {
return nil, err
}
if len(trees) > 0 { if len(trees) > 0 {
_, err := batchStdinWriter.Write(trees[len(trees)-1]) _, err := batchStdinWriter.Write(trees[len(trees)-1])
if err != nil { if err != nil {

View File

@ -49,6 +49,9 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
log("Unable to get commit for: %s. Err: %v", commitID, err) log("Unable to get commit for: %s. Err: %v", commitID, err)
return nil, err return nil, err
} }
if _, err = batchReader.Discard(1); err != nil {
return nil, err
}
tree := commit.Tree tree := commit.Tree

View File

@ -216,6 +216,9 @@ func (b *BleveIndexer) addUpdate(batchWriter git.WriteCloserError, batchReader *
return nil return nil
} }
if _, err = batchReader.Discard(1); err != nil {
return err
}
id := filenameIndexerID(repo.ID, update.Filename) id := filenameIndexerID(repo.ID, update.Filename)
return batch.Index(id, &RepoIndexerData{ return batch.Index(id, &RepoIndexerData{
RepoID: repo.ID, RepoID: repo.ID,

View File

@ -215,6 +215,9 @@ func (b *ElasticSearchIndexer) addUpdate(batchWriter git.WriteCloserError, batch
return nil, nil return nil, nil
} }
if _, err = batchReader.Discard(1); err != nil {
return nil, err
}
id := filenameIndexerID(repo.ID, update.Filename) id := filenameIndexerID(repo.ID, update.Filename)
return []elastic.BulkableRequest{ return []elastic.BulkableRequest{

20
vendor/github.com/djherbis/buffer/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,20 @@
language: go
go:
- tip
before_install:
- go get golang.org/x/lint/golint
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
script:
- '[ "${TRAVIS_PULL_REQUEST}" != "false" ] || $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN'
- $HOME/gopath/bin/golint ./...
- go vet
- go test -v ./...
notifications:
email:
on_success: never
on_failure: change
env:
global:
secure: X2uEipzLOL7IDFQgiJdKQvA7gWw746gmU4HoLr73Au+mDZnIaYfpM7pR0r9S9DY23obmflOBFytB9IIyr6Ganhs8KDd6osBS3JSu5ydZKhoHDshSZHxW6GdCiR0Ya85JZ2k/CzwuZ95FcCTztXG59D8VhAoM+8gNW6VLK2mL60Y=

20
vendor/github.com/djherbis/buffer/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2015 Dustin H
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

174
vendor/github.com/djherbis/buffer/README.md generated vendored Normal file
View File

@ -0,0 +1,174 @@
Buffer
==========
[![GoDoc](https://godoc.org/github.com/djherbis/buffer?status.svg)](https://godoc.org/github.com/djherbis/buffer)
[![Release](https://img.shields.io/github/release/djherbis/buffer.svg)](https://github.com/djherbis/buffer/releases/latest)
[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt)
[![Build Status](https://travis-ci.org/djherbis/buffer.svg?branch=master)](https://travis-ci.org/djherbis/buffer)
[![Coverage Status](https://coveralls.io/repos/djherbis/buffer/badge.svg?branch=master)](https://coveralls.io/r/djherbis/buffer?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/djherbis/buffer)](https://goreportcard.com/report/github.com/djherbis/buffer)
Usage
------------
The following buffers provide simple unique behaviours which when composed can create complex buffering strategies. For use with github.com/djherbis/nio for Buffered io.Pipe and io.Copy implementations.
For example:
```go
import (
"github.com/djherbis/buffer"
"github.com/djherbis/nio"
"io/ioutil"
)
// Buffer 32KB to Memory, after that buffer to 100MB chunked files
buf := buffer.NewUnboundedBuffer(32*1024, 100*1024*1024)
nio.Copy(w, r, buf) // Reads from r, writes to buf, reads from buf writes to w (concurrently).
// Buffer 32KB to Memory, discard overflow
buf = buffer.NewSpill(32*1024, ioutil.Discard)
nio.Copy(w, r, buf)
```
Supported Buffers
------------
#### Bounded Buffers ####
Memory: Wrapper for bytes.Buffer
File: File-based buffering. The file never exceeds Cap() in length, no matter how many times its written/read from. It accomplishes this by "wrapping" around the fixed max-length file when the data gets too long but there is available freed space at the beginning of the file. The caller is responsible for closing and deleting the file when done.
```go
import (
"ioutil"
"os"
"github.com/djherbis/buffer"
)
// Create a File-based Buffer with max size 100MB
file, err := ioutil.TempFile("", "buffer")
if err != nil {
return err
}
defer os.Remove(file.Name())
defer file.Close()
buf := buffer.NewFile(100*1024*1024, file)
// A simpler way:
pool := buffer.NewFilePool(100*1024*1024, "") // "" -- use temp dir
buf, err := pool.Get() // allocate the buffer
if err != nil {
return err
}
defer pool.Put(buf) // close and remove the allocated file for the buffer
```
Multi: A fixed length linked-list of buffers. Each buffer reads from the next buffer so that all the buffered data is shifted upwards in the list when reading. Writes are always written to the first buffer in the list whose Len() < Cap().
```go
import (
"github.com/djherbis/buffer"
)
mem := buffer.New(32*1024)
file := buffer.NewFile(100*1024*1024, someFileObj)) // you'll need to manage Open(), Close() and Delete someFileObj
// Buffer composed of 32KB of memory, and 100MB of file.
buf := buffer.NewMulti(mem, file)
```
#### Unbounded Buffers ####
Partition: A queue of buffers. Writes always go to the last buffer in the queue. If all buffers are full, a new buffer is "pushed" to the end of the queue (generated by a user-given function). Reads come from the first buffer, when the first buffer is emptied it is "popped" off the queue.
```go
import (
"github.com/djherbis/buffer"
)
// Create 32 KB sized-chunks of memory as needed to expand/contract the buffer size.
buf := buffer.NewPartition(buffer.NewMemPool(32*1024))
// Create 100 MB sized-chunks of files as needed to expand/contract the buffer size.
buf = buffer.NewPartition(buffer.NewFilePool(100*1024*1024, ""))
```
Ring: A single buffer which begins overwriting the oldest buffered data when it reaches its capacity.
```go
import (
"github.com/djherbis/buffer"
)
// Create a File-based Buffer with max size 100MB
file := buffer.NewFile(100*1024*1024, someFileObj) // you'll need to Open(), Close() and Delete someFileObj.
// If buffered data exceeds 100MB, overwrite oldest data as new data comes in
buf := buffer.NewRing(file) // requires BufferAt interface.
```
Spill: A single buffer which when full, writes the overflow to a given io.Writer.
-> Note that it will actually "spill" whenever there is an error while writing, this should only be a "full" error.
```go
import (
"github.com/djherbis/buffer"
"github.com/djherbis/nio"
"io/ioutil"
)
// Buffer 32KB to Memory, discard overflow
buf := buffer.NewSpill(32*1024, ioutil.Discard)
nio.Copy(w, r, buf)
```
#### Empty Buffer ####
Discard: Reads always return EOF, writes goto ioutil.Discard.
```go
import (
"github.com/djherbis/buffer"
)
// Reads will return io.EOF, writes will return success (nil error, full write) but no data was written.
buf := buffer.Discard
```
Custom Buffers
------------
Feel free to implement your own buffer, just meet the required interface (Buffer/BufferAt) and compose away!
```go
// Buffer Interface used by Multi and Partition
type Buffer interface {
Len() int64
Cap() int64
io.Reader
io.Writer
Reset()
}
// BufferAt interface used by Ring
type BufferAt interface {
Buffer
io.ReaderAt
io.WriterAt
}
```
Installation
------------
```sh
go get github.com/djherbis/buffer
```

48
vendor/github.com/djherbis/buffer/buffer.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Package buffer implements a series of Buffers which can be composed to implement complicated buffering strategies
package buffer
import (
"io"
"os"
)
// Buffer is used to Write() data which will be Read() later.
type Buffer interface {
Len() int64 // How much data is Buffered in bytes
Cap() int64 // How much data can be Buffered at once in bytes.
io.Reader // Read() will read from the top of the buffer [io.EOF if empty]
io.Writer // Write() will write to the end of the buffer [io.ErrShortWrite if not enough space]
Reset() // Truncates the buffer, Len() == 0.
}
// BufferAt is a buffer which supports io.ReaderAt and io.WriterAt
type BufferAt interface {
Buffer
io.ReaderAt
io.WriterAt
}
func len64(p []byte) int64 {
return int64(len(p))
}
// Gap returns buf.Cap() - buf.Len()
func Gap(buf Buffer) int64 {
return buf.Cap() - buf.Len()
}
// Full returns true iff buf.Len() == buf.Cap()
func Full(buf Buffer) bool {
return buf.Len() == buf.Cap()
}
// Empty returns false iff buf.Len() == 0
func Empty(buf Buffer) bool {
return buf.Len() == 0
}
// NewUnboundedBuffer returns a Buffer which buffers "mem" bytes to memory
// and then creates file's of size "file" to buffer above "mem" bytes.
func NewUnboundedBuffer(mem, file int64) Buffer {
return NewMulti(New(mem), NewPartition(NewFilePool(file, os.TempDir())))
}

36
vendor/github.com/djherbis/buffer/discard.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package buffer
import (
"encoding/gob"
"io"
"io/ioutil"
"math"
)
type discard struct{}
// Discard is a Buffer which writes to ioutil.Discard and read's return 0, io.EOF.
// All of its methods are concurrent safe.
var Discard Buffer = discard{}
func (buf discard) Len() int64 {
return 0
}
func (buf discard) Cap() int64 {
return math.MaxInt64
}
func (buf discard) Reset() {}
func (buf discard) Read(p []byte) (n int, err error) {
return 0, io.EOF
}
func (buf discard) Write(p []byte) (int, error) {
return ioutil.Discard.Write(p)
}
func init() {
gob.Register(&discard{})
}

72
vendor/github.com/djherbis/buffer/file.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package buffer
import (
"bytes"
"encoding/gob"
"fmt"
"io"
"os"
"path/filepath"
"github.com/djherbis/buffer/wrapio"
)
// File is used as the backing resource for a the NewFile BufferAt.
type File interface {
Name() string
Stat() (fi os.FileInfo, err error)
io.ReaderAt
io.WriterAt
Close() error
}
type fileBuffer struct {
file File
*wrapio.Wrapper
}
// NewFile returns a new BufferAt backed by "file" with max-size N.
func NewFile(N int64, file File) BufferAt {
return &fileBuffer{
file: file,
Wrapper: wrapio.NewWrapper(file, 0, 0, N),
}
}
func init() {
gob.Register(&fileBuffer{})
}
func (buf *fileBuffer) MarshalBinary() ([]byte, error) {
fullpath, err := filepath.Abs(filepath.Dir(buf.file.Name()))
if err != nil {
return nil, err
}
base := filepath.Base(buf.file.Name())
buf.file.Close()
buffer := bytes.NewBuffer(nil)
fmt.Fprintln(buffer, filepath.Join(fullpath, base))
fmt.Fprintln(buffer, buf.Wrapper.N, buf.Wrapper.L, buf.Wrapper.O)
return buffer.Bytes(), nil
}
func (buf *fileBuffer) UnmarshalBinary(data []byte) error {
buffer := bytes.NewBuffer(data)
var filename string
var N, L, O int64
_, err := fmt.Fscanln(buffer, &filename)
if err != nil {
return err
}
file, err := os.Open(filename)
if err != nil {
return err
}
buf.file = file
_, err = fmt.Fscanln(buffer, &N, &L, &O)
buf.Wrapper = wrapio.NewWrapper(file, L, O, N)
return err
}

3
vendor/github.com/djherbis/buffer/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/djherbis/buffer
go 1.13

31
vendor/github.com/djherbis/buffer/limio/limit.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package limio
import "io"
type limitedWriter struct {
W io.Writer
N int64
}
func (l *limitedWriter) Write(p []byte) (n int, err error) {
if l.N <= 0 {
return 0, io.ErrShortWrite
}
if int64(len(p)) > l.N {
p = p[0:l.N]
err = io.ErrShortWrite
}
n, er := l.W.Write(p)
if er != nil {
err = er
}
l.N -= int64(n)
return n, err
}
// LimitWriter works like io.LimitReader. It writes at most n bytes
// to the underlying Writer. It returns io.ErrShortWrite if more than n
// bytes are attempted to be written.
func LimitWriter(w io.Writer, n int64) io.Writer {
return &limitedWriter{W: w, N: n}
}

47
vendor/github.com/djherbis/buffer/list.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package buffer
import "math"
// List is a slice of Buffers, it's the backing for NewPartition
type List []Buffer
// Len is the sum of the Len()'s of the Buffers in the List.
func (l *List) Len() (n int64) {
for _, buffer := range *l {
if n > math.MaxInt64-buffer.Len() {
return math.MaxInt64
}
n += buffer.Len()
}
return n
}
// Cap is the sum of the Cap()'s of the Buffers in the List.
func (l *List) Cap() (n int64) {
for _, buffer := range *l {
if n > math.MaxInt64-buffer.Cap() {
return math.MaxInt64
}
n += buffer.Cap()
}
return n
}
// Reset calls Reset() on each of the Buffers in the list.
func (l *List) Reset() {
for _, buffer := range *l {
buffer.Reset()
}
}
// Push adds a Buffer to the end of the List
func (l *List) Push(b Buffer) {
*l = append(*l, b)
}
// Pop removes and returns a Buffer from the front of the List
func (l *List) Pop() (b Buffer) {
b = (*l)[0]
*l = (*l)[1:]
return b
}

47
vendor/github.com/djherbis/buffer/list_at.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package buffer
import "math"
// ListAt is a slice of BufferAt's, it's the backing for NewPartitionAt
type ListAt []BufferAt
// Len is the sum of the Len()'s of the BufferAt's in the list.
func (l *ListAt) Len() (n int64) {
for _, buffer := range *l {
if n > math.MaxInt64-buffer.Len() {
return math.MaxInt64
}
n += buffer.Len()
}
return n
}
// Cap is the sum of the Cap()'s of the BufferAt's in the list.
func (l *ListAt) Cap() (n int64) {
for _, buffer := range *l {
if n > math.MaxInt64-buffer.Cap() {
return math.MaxInt64
}
n += buffer.Cap()
}
return n
}
// Reset calls Reset() on each of the BufferAt's in the list.
func (l *ListAt) Reset() {
for _, buffer := range *l {
buffer.Reset()
}
}
// Push adds a BufferAt to the end of the list
func (l *ListAt) Push(b BufferAt) {
*l = append(*l, b)
}
// Pop removes and returns a BufferAt from the front of the list
func (l *ListAt) Pop() (b BufferAt) {
b = (*l)[0]
*l = (*l)[1:]
return b
}

82
vendor/github.com/djherbis/buffer/mem.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
package buffer
import (
"bytes"
"encoding/gob"
"fmt"
"io"
"github.com/djherbis/buffer/limio"
)
type memory struct {
N int64
*bytes.Buffer
}
// New returns a new in memory BufferAt with max size N.
// It's backed by a bytes.Buffer.
func New(n int64) BufferAt {
return &memory{
N: n,
Buffer: bytes.NewBuffer(nil),
}
}
func (buf *memory) Cap() int64 {
return buf.N
}
func (buf *memory) Len() int64 {
return int64(buf.Buffer.Len())
}
func (buf *memory) Write(p []byte) (n int, err error) {
return limio.LimitWriter(buf.Buffer, Gap(buf)).Write(p)
}
func (buf *memory) WriteAt(p []byte, off int64) (n int, err error) {
if off > buf.Len() {
return 0, io.ErrShortWrite
} else if len64(p)+off <= buf.Len() {
d := buf.Bytes()[off:]
return copy(d, p), nil
} else {
d := buf.Bytes()[off:]
n = copy(d, p)
m, err := buf.Write(p[n:])
return n + m, err
}
}
func (buf *memory) ReadAt(p []byte, off int64) (n int, err error) {
return bytes.NewReader(buf.Bytes()).ReadAt(p, off)
}
func (buf *memory) Read(p []byte) (n int, err error) {
return io.LimitReader(buf.Buffer, buf.Len()).Read(p)
}
func (buf *memory) ReadFrom(r io.Reader) (n int64, err error) {
return buf.Buffer.ReadFrom(io.LimitReader(r, Gap(buf)))
}
func init() {
gob.Register(&memory{})
}
func (buf *memory) MarshalBinary() ([]byte, error) {
var b bytes.Buffer
fmt.Fprintln(&b, buf.N)
b.Write(buf.Bytes())
return b.Bytes(), nil
}
func (buf *memory) UnmarshalBinary(bindata []byte) error {
data := make([]byte, len(bindata))
copy(data, bindata)
b := bytes.NewBuffer(data)
_, err := fmt.Fscanln(b, &buf.N)
buf.Buffer = bytes.NewBuffer(b.Bytes())
return err
}

185
vendor/github.com/djherbis/buffer/multi.go generated vendored Normal file
View File

@ -0,0 +1,185 @@
package buffer
import (
"bytes"
"encoding/gob"
"io"
"math"
)
type chain struct {
Buf BufferAt
Next BufferAt
}
type nopBufferAt struct {
Buffer
}
func (buf *nopBufferAt) ReadAt(p []byte, off int64) (int, error) {
panic("ReadAt not implemented")
}
func (buf *nopBufferAt) WriteAt(p []byte, off int64) (int, error) {
panic("WriteAt not implemented")
}
// toBufferAt converts a Buffer to a BufferAt with nop ReadAt and WriteAt funcs
func toBufferAt(buf Buffer) BufferAt {
return &nopBufferAt{Buffer: buf}
}
// NewMultiAt returns a BufferAt which is the logical concatenation of the passed BufferAts.
// The data in the buffers is shifted such that there is no non-empty buffer following
// a non-full buffer, this process is also run after every Read.
// If no buffers are passed, the returned Buffer is nil.
func NewMultiAt(buffers ...BufferAt) BufferAt {
if len(buffers) == 0 {
return nil
} else if len(buffers) == 1 {
return buffers[0]
}
buf := &chain{
Buf: buffers[0],
Next: NewMultiAt(buffers[1:]...),
}
buf.Defrag()
return buf
}
// NewMulti returns a Buffer which is the logical concatenation of the passed buffers.
// The data in the buffers is shifted such that there is no non-empty buffer following
// a non-full buffer, this process is also run after every Read.
// If no buffers are passed, the returned Buffer is nil.
func NewMulti(buffers ...Buffer) Buffer {
bufAt := make([]BufferAt, len(buffers))
for i, buf := range buffers {
bufAt[i] = toBufferAt(buf)
}
return NewMultiAt(bufAt...)
}
func (buf *chain) Reset() {
buf.Next.Reset()
buf.Buf.Reset()
}
func (buf *chain) Cap() (n int64) {
Next := buf.Next.Cap()
if buf.Buf.Cap() > math.MaxInt64-Next {
return math.MaxInt64
}
return buf.Buf.Cap() + Next
}
func (buf *chain) Len() (n int64) {
Next := buf.Next.Len()
if buf.Buf.Len() > math.MaxInt64-Next {
return math.MaxInt64
}
return buf.Buf.Len() + Next
}
func (buf *chain) Defrag() {
for !Full(buf.Buf) && !Empty(buf.Next) {
r := io.LimitReader(buf.Next, Gap(buf.Buf))
if _, err := io.Copy(buf.Buf, r); err != nil && err != io.EOF {
return
}
}
}
func (buf *chain) Read(p []byte) (n int, err error) {
n, err = buf.Buf.Read(p)
if len(p[n:]) > 0 && (err == nil || err == io.EOF) {
m, err := buf.Next.Read(p[n:])
n += m
if err != nil {
return n, err
}
}
buf.Defrag()
return n, err
}
func (buf *chain) ReadAt(p []byte, off int64) (n int, err error) {
if buf.Buf.Len() < off {
return buf.Next.ReadAt(p, off-buf.Buf.Len())
}
n, err = buf.Buf.ReadAt(p, off)
if len(p[n:]) > 0 && (err == nil || err == io.EOF) {
var m int
m, err = buf.Next.ReadAt(p[n:], 0)
n += m
}
return n, err
}
func (buf *chain) Write(p []byte) (n int, err error) {
if n, err = buf.Buf.Write(p); err == io.ErrShortWrite {
err = nil
}
p = p[n:]
if len(p) > 0 && err == nil {
m, err := buf.Next.Write(p)
n += m
if err != nil {
return n, err
}
}
return n, err
}
func (buf *chain) WriteAt(p []byte, off int64) (n int, err error) {
switch {
case buf.Buf.Cap() <= off: // past the end
return buf.Next.WriteAt(p, off-buf.Buf.Cap())
case buf.Buf.Cap() >= off+int64(len(p)): // fits in
return buf.Buf.WriteAt(p, off)
default: // partial fit
n, err = buf.Buf.WriteAt(p, off)
if len(p[n:]) > 0 && (err == nil || err == io.ErrShortWrite) {
var m int
m, err = buf.Next.WriteAt(p[n:], 0)
n += m
}
return n, err
}
}
func init() {
gob.Register(&chain{})
gob.Register(&nopBufferAt{})
}
func (buf *chain) MarshalBinary() ([]byte, error) {
b := bytes.NewBuffer(nil)
enc := gob.NewEncoder(b)
if err := enc.Encode(&buf.Buf); err != nil {
return nil, err
}
if err := enc.Encode(&buf.Next); err != nil {
return nil, err
}
return b.Bytes(), nil
}
func (buf *chain) UnmarshalBinary(data []byte) error {
b := bytes.NewBuffer(data)
dec := gob.NewDecoder(b)
if err := dec.Decode(&buf.Buf); err != nil {
return err
}
if err := dec.Decode(&buf.Next); err != nil {
return err
}
return nil
}

101
vendor/github.com/djherbis/buffer/partition.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
package buffer
import (
"encoding/gob"
"io"
"math"
)
type partition struct {
List
Pool
}
// NewPartition returns a Buffer which uses a Pool to extend or shrink its size as needed.
// It automatically allocates new buffers with pool.Get() to extend is length, and
// pool.Put() to release unused buffers as it shrinks.
func NewPartition(pool Pool, buffers ...Buffer) Buffer {
return &partition{
Pool: pool,
List: buffers,
}
}
func (buf *partition) Cap() int64 {
return math.MaxInt64
}
func (buf *partition) Read(p []byte) (n int, err error) {
for len(p) > 0 {
if len(buf.List) == 0 {
return n, io.EOF
}
buffer := buf.List[0]
if Empty(buffer) {
buf.Pool.Put(buf.Pop())
continue
}
m, er := buffer.Read(p)
n += m
p = p[m:]
if er != nil && er != io.EOF {
return n, er
}
}
return n, nil
}
func (buf *partition) grow() error {
next, err := buf.Pool.Get()
if err != nil {
return err
}
buf.Push(next)
return nil
}
func (buf *partition) Write(p []byte) (n int, err error) {
for len(p) > 0 {
if len(buf.List) == 0 {
if err := buf.grow(); err != nil {
return n, err
}
}
buffer := buf.List[len(buf.List)-1]
if Full(buffer) {
if err := buf.grow(); err != nil {
return n, err
}
continue
}
m, er := buffer.Write(p)
n += m
p = p[m:]
if er != nil && er != io.ErrShortWrite {
return n, er
}
}
return n, nil
}
func (buf *partition) Reset() {
for len(buf.List) > 0 {
buf.Pool.Put(buf.Pop())
}
}
func init() {
gob.Register(&partition{})
}

187
vendor/github.com/djherbis/buffer/partition_at.go generated vendored Normal file
View File

@ -0,0 +1,187 @@
package buffer
import (
"encoding/gob"
"errors"
"io"
"math"
)
type partitionAt struct {
ListAt
PoolAt
}
// NewPartitionAt returns a BufferAt which uses a PoolAt to extend or shrink its size as needed.
// It automatically allocates new buffers with pool.Get() to extend is length, and
// pool.Put() to release unused buffers as it shrinks.
func NewPartitionAt(pool PoolAt, buffers ...BufferAt) BufferAt {
return &partitionAt{
PoolAt: pool,
ListAt: buffers,
}
}
func (buf *partitionAt) Cap() int64 {
return math.MaxInt64
}
func (buf *partitionAt) Read(p []byte) (n int, err error) {
for len(p) > 0 {
if len(buf.ListAt) == 0 {
return n, io.EOF
}
buffer := buf.ListAt[0]
if Empty(buffer) {
buf.PoolAt.Put(buf.Pop())
continue
}
m, er := buffer.Read(p)
n += m
p = p[m:]
if er != nil && er != io.EOF {
return n, er
}
}
return n, nil
}
func (buf *partitionAt) ReadAt(p []byte, off int64) (n int, err error) {
if off < 0 {
return 0, errors.New("buffer.PartionAt.ReadAt: negative offset")
}
for _, buffer := range buf.ListAt {
// Find the buffer where this offset is found.
if buffer.Len() <= off {
off -= buffer.Len()
continue
}
m, er := buffer.ReadAt(p, off)
n += m
p = p[m:]
if er != nil && er != io.EOF {
return n, er
}
if len(p) == 0 {
return n, er
}
// We need to read more, starting from 0 in the next buffer.
off = 0
}
if len(p) > 0 {
return n, io.EOF
}
return n, nil
}
func (buf *partitionAt) grow() error {
next, err := buf.PoolAt.Get()
if err != nil {
return err
}
buf.Push(next)
return nil
}
func (buf *partitionAt) Write(p []byte) (n int, err error) {
for len(p) > 0 {
if len(buf.ListAt) == 0 {
if err := buf.grow(); err != nil {
return n, err
}
}
buffer := buf.ListAt[len(buf.ListAt)-1]
if Full(buffer) {
if err := buf.grow(); err != nil {
return n, err
}
continue
}
m, er := buffer.Write(p)
n += m
p = p[m:]
if er != nil && er != io.ErrShortWrite {
return n, er
}
}
return n, nil
}
func (buf *partitionAt) WriteAt(p []byte, off int64) (n int, err error) {
if off < 0 {
return 0, errors.New("buffer.PartionAt.WriteAt: negative offset")
}
if off == buf.Len() { // writing at the end special case
if err := buf.grow(); err != nil {
return 0, err
}
}
fitCheck := BufferAt.Len
for i := 0; i < len(buf.ListAt); i++ {
buffer := buf.ListAt[i]
// Find the buffer where this offset is found.
if fitCheck(buffer) < off {
off -= fitCheck(buffer)
continue
}
if i+1 == len(buf.ListAt) {
fitCheck = BufferAt.Cap
}
endOff := off + int64(len(p))
if fitCheck(buffer) >= endOff {
// Everything should fit.
return buffer.WriteAt(p, off)
}
// Assume it won't all fit, only write what should fit.
canFit := int(fitCheck(buffer) - off)
if len(p[:canFit]) > 0 {
var m int
m, err = buffer.WriteAt(p[:canFit], off)
n += m
p = p[m:]
}
off = 0 // All writes are at offset 0 of following buffers now.
if err != nil || len(p) == 0 {
return n, err
}
if i+1 == len(buf.ListAt) {
if err := buf.grow(); err != nil {
return 0, err
}
fitCheck = BufferAt.Cap
}
}
if len(p) > 0 {
err = io.ErrShortWrite
}
return n, err
}
func (buf *partitionAt) Reset() {
for len(buf.ListAt) > 0 {
buf.PoolAt.Put(buf.Pop())
}
}
func init() {
gob.Register(&partitionAt{})
}

111
vendor/github.com/djherbis/buffer/pool.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package buffer
import (
"bytes"
"encoding/binary"
"encoding/gob"
"io/ioutil"
"os"
"sync"
)
// Pool provides a way to Allocate and Release Buffer objects
// Pools mut be concurrent-safe for calls to Get() and Put().
type Pool interface {
Get() (Buffer, error) // Allocate a Buffer
Put(buf Buffer) error // Release or Reuse a Buffer
}
type pool struct {
pool sync.Pool
}
// NewPool returns a Pool(), it's backed by a sync.Pool so its safe for concurrent use.
// Get() and Put() errors will always be nil.
// It will not work with gob.
func NewPool(New func() Buffer) Pool {
return &pool{
pool: sync.Pool{
New: func() interface{} {
return New()
},
},
}
}
func (p *pool) Get() (Buffer, error) {
return p.pool.Get().(Buffer), nil
}
func (p *pool) Put(buf Buffer) error {
buf.Reset()
p.pool.Put(buf)
return nil
}
type memPool struct {
N int64
Pool
}
// NewMemPool returns a Pool, Get() returns an in memory buffer of max size N.
// Put() returns the buffer to the pool after resetting it.
// Get() and Put() errors will always be nil.
func NewMemPool(N int64) Pool {
return &memPool{
N: N,
Pool: NewPool(func() Buffer {
return New(N)
}),
}
}
func (m *memPool) MarshalBinary() ([]byte, error) {
buf := bytes.NewBuffer(nil)
err := binary.Write(buf, binary.LittleEndian, m.N)
return buf.Bytes(), err
}
func (m *memPool) UnmarshalBinary(data []byte) error {
buf := bytes.NewReader(data)
err := binary.Read(buf, binary.LittleEndian, &m.N)
m.Pool = NewPool(func() Buffer {
return New(m.N)
})
return err
}
type filePool struct {
N int64
Directory string
}
// NewFilePool returns a Pool, Get() returns a file-based buffer of max size N.
// Put() closes and deletes the underlying file for the buffer.
// Get() may return an error if it fails to create a file for the buffer.
// Put() may return an error if it fails to delete the file.
func NewFilePool(N int64, dir string) Pool {
return &filePool{N: N, Directory: dir}
}
func (p *filePool) Get() (Buffer, error) {
file, err := ioutil.TempFile(p.Directory, "buffer")
if err != nil {
return nil, err
}
return NewFile(p.N, file), nil
}
func (p *filePool) Put(buf Buffer) (err error) {
buf.Reset()
if fileBuf, ok := buf.(*fileBuffer); ok {
fileBuf.file.Close()
err = os.Remove(fileBuf.file.Name())
}
return err
}
func init() {
gob.Register(&memPool{})
gob.Register(&filePool{})
}

111
vendor/github.com/djherbis/buffer/pool_at.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package buffer
import (
"bytes"
"encoding/binary"
"encoding/gob"
"io/ioutil"
"os"
"sync"
)
// PoolAt provides a way to Allocate and Release BufferAt objects
// PoolAt's mut be concurrent-safe for calls to Get() and Put().
type PoolAt interface {
Get() (BufferAt, error) // Allocate a BufferAt
Put(buf BufferAt) error // Release or Reuse a BufferAt
}
type poolAt struct {
poolAt sync.Pool
}
// NewPoolAt returns a PoolAt(), it's backed by a sync.Pool so its safe for concurrent use.
// Get() and Put() errors will always be nil.
// It will not work with gob.
func NewPoolAt(New func() BufferAt) PoolAt {
return &poolAt{
poolAt: sync.Pool{
New: func() interface{} {
return New()
},
},
}
}
func (p *poolAt) Get() (BufferAt, error) {
return p.poolAt.Get().(BufferAt), nil
}
func (p *poolAt) Put(buf BufferAt) error {
buf.Reset()
p.poolAt.Put(buf)
return nil
}
type memPoolAt struct {
N int64
PoolAt
}
// NewMemPoolAt returns a PoolAt, Get() returns an in memory buffer of max size N.
// Put() returns the buffer to the pool after resetting it.
// Get() and Put() errors will always be nil.
func NewMemPoolAt(N int64) PoolAt {
return &memPoolAt{
N: N,
PoolAt: NewPoolAt(func() BufferAt {
return New(N)
}),
}
}
func (m *memPoolAt) MarshalBinary() ([]byte, error) {
buf := bytes.NewBuffer(nil)
err := binary.Write(buf, binary.LittleEndian, m.N)
return buf.Bytes(), err
}
func (m *memPoolAt) UnmarshalBinary(data []byte) error {
buf := bytes.NewReader(data)
err := binary.Read(buf, binary.LittleEndian, &m.N)
m.PoolAt = NewPoolAt(func() BufferAt {
return New(m.N)
})
return err
}
type filePoolAt struct {
N int64
Directory string
}
// NewFilePoolAt returns a PoolAt, Get() returns a file-based buffer of max size N.
// Put() closes and deletes the underlying file for the buffer.
// Get() may return an error if it fails to create a file for the buffer.
// Put() may return an error if it fails to delete the file.
func NewFilePoolAt(N int64, dir string) PoolAt {
return &filePoolAt{N: N, Directory: dir}
}
func (p *filePoolAt) Get() (BufferAt, error) {
file, err := ioutil.TempFile(p.Directory, "buffer")
if err != nil {
return nil, err
}
return NewFile(p.N, file), nil
}
func (p *filePoolAt) Put(buf BufferAt) (err error) {
buf.Reset()
if fileBuf, ok := buf.(*fileBuffer); ok {
fileBuf.file.Close()
err = os.Remove(fileBuf.file.Name())
}
return err
}
func init() {
gob.Register(&memPoolAt{})
gob.Register(&filePoolAt{})
}

58
vendor/github.com/djherbis/buffer/ring.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
package buffer
import (
"io"
"math"
"github.com/djherbis/buffer/wrapio"
)
type ring struct {
BufferAt
L int64
*wrapio.WrapReader
*wrapio.WrapWriter
}
// NewRing returns a Ring Buffer from a BufferAt.
// It overwrites old data in the Buffer when needed (when its full).
func NewRing(buffer BufferAt) Buffer {
return &ring{
BufferAt: buffer,
WrapReader: wrapio.NewWrapReader(buffer, 0, buffer.Cap()),
WrapWriter: wrapio.NewWrapWriter(buffer, 0, buffer.Cap()),
}
}
func (buf *ring) Len() int64 {
return buf.L
}
func (buf *ring) Cap() int64 {
return math.MaxInt64
}
func (buf *ring) Read(p []byte) (n int, err error) {
if buf.L == buf.BufferAt.Cap() {
buf.WrapReader.Seek(buf.WrapWriter.Offset(), 0)
}
n, err = io.LimitReader(buf.WrapReader, buf.L).Read(p)
buf.L -= int64(n)
return n, err
}
func (buf *ring) Write(p []byte) (n int, err error) {
n, err = buf.WrapWriter.Write(p)
buf.L += int64(n)
if buf.L > buf.BufferAt.Cap() {
buf.L = buf.BufferAt.Cap()
}
return n, err
}
func (buf *ring) Reset() {
buf.BufferAt.Reset()
buf.L = 0
buf.WrapReader = wrapio.NewWrapReader(buf.BufferAt, 0, buf.BufferAt.Cap())
buf.WrapWriter = wrapio.NewWrapWriter(buf.BufferAt, 0, buf.BufferAt.Cap())
}

41
vendor/github.com/djherbis/buffer/spill.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package buffer
import (
"encoding/gob"
"io"
"io/ioutil"
"math"
)
type spill struct {
Buffer
Spiller io.Writer
}
// NewSpill returns a Buffer which writes data to w when there's an error
// writing to buf. Such as when buf is full, or the disk is full, etc.
func NewSpill(buf Buffer, w io.Writer) Buffer {
if w == nil {
w = ioutil.Discard
}
return &spill{
Buffer: buf,
Spiller: w,
}
}
func (buf *spill) Cap() int64 {
return math.MaxInt64
}
func (buf *spill) Write(p []byte) (n int, err error) {
if n, err = buf.Buffer.Write(p); err != nil {
m, err := buf.Spiller.Write(p[n:])
return m + n, err
}
return len(p), nil
}
func init() {
gob.Register(&spill{})
}

99
vendor/github.com/djherbis/buffer/swap.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package buffer
import (
"encoding/gob"
"io"
)
type swap struct {
A BufferAt
B BufferAt
}
// NewSwap creates a Buffer which writes to a until you write past a.Cap()
// then it io.Copy's from a to b and writes to b.
// Once the Buffer is empty again, it starts over writing to a.
// Note that if b.Cap() <= a.Cap() it will cause a panic, b is expected
// to be larger in order to accommodate writes past a.Cap().
func NewSwap(a, b Buffer) Buffer {
return NewSwapAt(toBufferAt(a), toBufferAt(b))
}
// NewSwapAt creates a BufferAt which writes to a until you write past a.Cap()
// then it io.Copy's from a to b and writes to b.
// Once the Buffer is empty again, it starts over writing to a.
// Note that if b.Cap() <= a.Cap() it will cause a panic, b is expected
// to be larger in order to accommodate writes past a.Cap().
func NewSwapAt(a, b BufferAt) BufferAt {
if b.Cap() <= a.Cap() {
panic("Buffer b must be larger than a.")
}
return &swap{A: a, B: b}
}
func (buf *swap) Len() int64 {
return buf.A.Len() + buf.B.Len()
}
func (buf *swap) Cap() int64 {
return buf.B.Cap()
}
func (buf *swap) Read(p []byte) (n int, err error) {
if buf.A.Len() > 0 {
return buf.A.Read(p)
}
return buf.B.Read(p)
}
func (buf *swap) ReadAt(p []byte, off int64) (n int, err error) {
if buf.A.Len() > 0 {
return buf.A.ReadAt(p, off)
}
return buf.B.ReadAt(p, off)
}
func (buf *swap) Write(p []byte) (n int, err error) {
switch {
case buf.B.Len() > 0:
n, err = buf.B.Write(p)
case buf.A.Len()+int64(len(p)) > buf.A.Cap():
_, err = io.Copy(buf.B, buf.A)
if err == nil {
n, err = buf.B.Write(p)
}
default:
n, err = buf.A.Write(p)
}
return n, err
}
func (buf *swap) WriteAt(p []byte, off int64) (n int, err error) {
switch {
case buf.B.Len() > 0:
n, err = buf.B.WriteAt(p, off)
case off+int64(len(p)) > buf.A.Cap():
_, err = io.Copy(buf.B, buf.A)
if err == nil {
n, err = buf.B.WriteAt(p, off)
}
default:
n, err = buf.A.WriteAt(p, off)
}
return n, err
}
func (buf *swap) Reset() {
buf.A.Reset()
buf.B.Reset()
}
func init() {
gob.Register(&swap{})
}

94
vendor/github.com/djherbis/buffer/wrapio/limitwrap.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package wrapio
import (
"encoding/gob"
"io"
"github.com/djherbis/buffer/limio"
)
// ReadWriterAt implements io.ReaderAt and io.WriterAt
type ReadWriterAt interface {
io.ReaderAt
io.WriterAt
}
// Wrapper implements a io.ReadWriter and ReadWriterAt such that
// when reading/writing goes past N bytes, it "wraps" back to the beginning.
type Wrapper struct {
// N is the offset at which to "wrap" back to the start
N int64
// L is the length of the data written
L int64
// O is our offset in the data
O int64
rwa ReadWriterAt
}
// NewWrapper creates a Wrapper based on ReadWriterAt rwa.
// L is the current length, O is the current offset, and N is offset at which we "wrap".
func NewWrapper(rwa ReadWriterAt, L, O, N int64) *Wrapper {
return &Wrapper{
L: L,
O: O,
N: N,
rwa: rwa,
}
}
// Len returns the # of bytes in the Wrapper
func (wpr *Wrapper) Len() int64 {
return wpr.L
}
// Cap returns the "wrap" offset (max # of bytes)
func (wpr *Wrapper) Cap() int64 {
return wpr.N
}
// Reset seeks to the start (0 offset), and sets the length to 0.
func (wpr *Wrapper) Reset() {
wpr.O = 0
wpr.L = 0
}
// SetReadWriterAt lets you switch the underlying Read/WriterAt
func (wpr *Wrapper) SetReadWriterAt(rwa ReadWriterAt) {
wpr.rwa = rwa
}
// Read reads from the current offset into p, wrapping at Cap()
func (wpr *Wrapper) Read(p []byte) (n int, err error) {
n, err = wpr.ReadAt(p, 0)
wpr.L -= int64(n)
wpr.O += int64(n)
wpr.O %= wpr.N
return n, err
}
// ReadAt reads from the current offset+off into p, wrapping at Cap()
func (wpr *Wrapper) ReadAt(p []byte, off int64) (n int, err error) {
wrap := NewWrapReader(wpr.rwa, wpr.O+off, wpr.N)
r := io.LimitReader(wrap, wpr.L-off)
return r.Read(p)
}
// Write writes p to the end of the Wrapper (at Len()), wrapping at Cap()
func (wpr *Wrapper) Write(p []byte) (n int, err error) {
return wpr.WriteAt(p, wpr.L)
}
// WriteAt writes p at the current offset+off, wrapping at Cap()
func (wpr *Wrapper) WriteAt(p []byte, off int64) (n int, err error) {
wrap := NewWrapWriter(wpr.rwa, wpr.O+off, wpr.N)
w := limio.LimitWriter(wrap, wpr.N-off)
n, err = w.Write(p)
if wpr.L < off+int64(n) {
wpr.L = int64(n) + off
}
return n, err
}
func init() {
gob.Register(&Wrapper{})
}

139
vendor/github.com/djherbis/buffer/wrapio/wrap.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
package wrapio
import "io"
// DoerAt is a common interface for wrappers WriteAt or ReadAt functions
type DoerAt interface {
DoAt([]byte, int64) (int, error)
}
// DoAtFunc is implemented by ReadAt/WriteAt
type DoAtFunc func([]byte, int64) (int, error)
type wrapper struct {
off int64
wrapAt int64
doat DoAtFunc
}
func (w *wrapper) Offset() int64 {
return w.off
}
func (w *wrapper) Seek(offset int64, whence int) (int64, error) {
switch whence {
case 0:
w.off = offset
case 1:
w.off += offset
case 2:
w.off = (w.wrapAt + offset)
}
w.off %= w.wrapAt
return w.off, nil
}
func (w *wrapper) DoAt(p []byte, off int64) (n int, err error) {
return w.doat(p, off)
}
// WrapWriter wraps writes around a section of data.
type WrapWriter struct {
*wrapper
}
// NewWrapWriter creates a WrapWriter starting at offset off, and wrapping at offset wrapAt.
func NewWrapWriter(w io.WriterAt, off int64, wrapAt int64) *WrapWriter {
return &WrapWriter{
&wrapper{
doat: w.WriteAt,
off: (off % wrapAt),
wrapAt: wrapAt,
},
}
}
// Write writes p starting at the current offset, wrapping when it reaches the end.
// The current offset is shifted forward by the amount written.
func (w *WrapWriter) Write(p []byte) (n int, err error) {
n, err = Wrap(w, p, w.off, w.wrapAt)
w.off = (w.off + int64(n)) % w.wrapAt
return n, err
}
// WriteAt writes p starting at offset off, wrapping when it reaches the end.
func (w *WrapWriter) WriteAt(p []byte, off int64) (n int, err error) {
return Wrap(w, p, off, w.wrapAt)
}
// WrapReader wraps reads around a section of data.
type WrapReader struct {
*wrapper
}
// NewWrapReader creates a WrapReader starting at offset off, and wrapping at offset wrapAt.
func NewWrapReader(r io.ReaderAt, off int64, wrapAt int64) *WrapReader {
return &WrapReader{
&wrapper{
doat: r.ReadAt,
off: (off % wrapAt),
wrapAt: wrapAt,
},
}
}
// Read reads into p starting at the current offset, wrapping if it reaches the end.
// The current offset is shifted forward by the amount read.
func (r *WrapReader) Read(p []byte) (n int, err error) {
n, err = Wrap(r, p, r.off, r.wrapAt)
r.off = (r.off + int64(n)) % r.wrapAt
return n, err
}
// ReadAt reads into p starting at the current offset, wrapping when it reaches the end.
func (r *WrapReader) ReadAt(p []byte, off int64) (n int, err error) {
return Wrap(r, p, off, r.wrapAt)
}
// maxConsecutiveEmptyActions determines how many consecutive empty reads/writes can occur before giving up
const maxConsecutiveEmptyActions = 100
// Wrap causes an action on an array of bytes (like read/write) to be done from an offset off,
// wrapping at offset wrapAt.
func Wrap(w DoerAt, p []byte, off int64, wrapAt int64) (n int, err error) {
var m, fails int
off %= wrapAt
for len(p) > 0 {
if off+int64(len(p)) < wrapAt {
m, err = w.DoAt(p, off)
} else {
space := wrapAt - off
m, err = w.DoAt(p[:space], off)
}
if err != nil && err != io.EOF {
return n + m, err
}
switch m {
case 0:
fails++
default:
fails = 0
}
if fails > maxConsecutiveEmptyActions {
return n + m, io.ErrNoProgress
}
n += m
p = p[m:]
off += int64(m)
off %= wrapAt
}
return n, err
}

22
vendor/github.com/djherbis/nio/v3/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,22 @@
language: go
go:
- tip
before_install:
- go get -u golang.org/x/lint/golint
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover;
fi
script:
- '[ "${TRAVIS_PULL_REQUEST}" != "false" ] || $HOME/gopath/bin/goveralls -service=travis-ci
-repotoken $COVERALLS_TOKEN'
- "$HOME/gopath/bin/golint ./..."
- go vet
- go test -bench=.* -v ./...
notifications:
email:
on_success: never
on_failure: change
env:
global:
secure: gpKsimMN5YScLnbcoWvJPw8VL+qCpZgnC4i8mFn/lRX5Ta9FhDMROQre0Ko4bU9RX/u/IBL1fO/IyaVtVWQ0fhsDi+ovrh3LgzewwZBgz7FGiyFpagvf91Jwq5Yus15QQZ8MebrQ41H1YiWMdLOHlZdN6gNb0cswg3w4MRjbGb4=

20
vendor/github.com/djherbis/nio/v3/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2015 Dustin H
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

65
vendor/github.com/djherbis/nio/v3/README.md generated vendored Normal file
View File

@ -0,0 +1,65 @@
nio
==========
[![GoDoc](https://godoc.org/github.com/djherbis/nio?status.svg)](https://godoc.org/github.com/djherbis/nio)
[![Release](https://img.shields.io/github/release/djherbis/nio.svg)](https://github.com/djherbis/nio/releases/latest)
[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt)
[![Build Status](https://travis-ci.org/djherbis/nio.svg)](https://travis-ci.org/djherbis/nio)
[![Coverage Status](https://coveralls.io/repos/djherbis/nio/badge.svg?branch=master)](https://coveralls.io/r/djherbis/nio?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/djherbis/nio)](https://goreportcard.com/report/github.com/djherbis/nio)
Usage
-----
The Buffer interface:
```go
type Buffer interface {
Len() int64
Cap() int64
io.ReadWriter
}
```
nio's Copy method concurrently copies from an io.Reader to a supplied nio.Buffer,
then from the nio.Buffer to an io.Writer. This way, blocking writes don't slow the io.Reader.
```go
import (
"github.com/djherbis/buffer"
"github.com/djherbis/nio"
)
buf := buffer.New(32*1024) // 32KB In memory Buffer
nio.Copy(w, r, buf) // Reads and Writes concurrently, buffering using buf.
```
nio's Pipe method is a buffered version of io.Pipe
The writer return once its data has been written to the Buffer.
The reader returns with data off the Buffer.
```go
import (
"gopkg.in/djherbis/buffer.v1"
"gopkg.in/djherbis/nio.v2"
)
buf := buffer.New(32*1024) // 32KB In memory Buffer
r, w := nio.Pipe(buf)
```
Installation
------------
```sh
go get gopkg.in/djherbis/nio.v2
```
For some pre-built buffers grab:
```sh
go get gopkg.in/djherbis/buffer.v1
```
Mentions
------------
[GopherCon 2017: Peter Bourgon - Evolutionary Optimization with Go](https://www.youtube.com/watch?v=ha8gdZ27wMo&start=2077&end=2140)

5
vendor/github.com/djherbis/nio/v3/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module github.com/djherbis/nio/v3
go 1.16
require github.com/djherbis/buffer v1.1.0

2
vendor/github.com/djherbis/nio/v3/go.sum generated vendored Normal file
View File

@ -0,0 +1,2 @@
github.com/djherbis/buffer v1.1.0 h1:uGQ+DZDAMlfC2z3khbBtLcAHC0wyoNrX9lpOml3g3fg=
github.com/djherbis/buffer v1.1.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o=

53
vendor/github.com/djherbis/nio/v3/nio.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Package nio provides a few buffered io primitives.
package nio
import "io"
// Buffer is used to store bytes.
type Buffer interface {
// Len returns how many bytes are buffered
Len() int64
// Cap returns how many bytes can in the buffer at a time
Cap() int64
// ReadWriter writes are stored in the buffer, reads return the stored data
io.ReadWriter
}
// Pipe creates a buffered pipe.
// It can be used to connect code expecting an io.Reader with code expecting an io.Writer.
// Reads on one end read from the supplied Buffer. Writes write to the supplied Buffer.
// It is safe to call Read and Write in parallel with each other or with Close.
// Close will complete once pending I/O is done, and may cancel blocking Read/Writes.
// Buffered data will still be available to Read after the Writer has been closed.
// Parallel calls to Read, and parallel calls to Write are also safe :
// the individual calls will be gated sequentially.
func Pipe(buf Buffer) (r *PipeReader, w *PipeWriter) {
p := newBufferedPipe(buf)
r = &PipeReader{bufpipe: p}
w = &PipeWriter{bufpipe: p}
return r, w
}
// Copy copies from src to buf, and from buf to dst in parallel until
// either EOF is reached on src or an error occurs. It returns the number of bytes
// copied to dst and the first error encountered while copying, if any.
// EOF is not considered to be an error. If src implements WriterTo, it is used to
// write to the supplied Buffer. If dst implements ReaderFrom, it is used to read from
// the supplied Buffer.
func Copy(dst io.Writer, src io.Reader, buf Buffer) (n int64, err error) {
return io.Copy(dst, NewReader(src, buf))
}
// NewReader reads from the buffer which is concurrently filled with data from the passed src.
func NewReader(src io.Reader, buf Buffer) io.ReadCloser {
r, w := Pipe(buf)
go func() {
_, err := io.Copy(w, src)
w.CloseWithError(err)
}()
return r
}

177
vendor/github.com/djherbis/nio/v3/sync.go generated vendored Normal file
View File

@ -0,0 +1,177 @@
package nio
import (
"io"
"sync"
)
// PipeReader is the read half of the pipe.
type PipeReader struct {
*bufpipe
}
// CloseWithError closes the reader; subsequent writes to the write half of the pipe will return the error err.
func (r *PipeReader) CloseWithError(err error) error {
if err == nil {
err = io.ErrClosedPipe
}
r.bufpipe.l.Lock()
defer r.bufpipe.l.Unlock()
if r.bufpipe.rerr == nil {
r.bufpipe.rerr = err
r.bufpipe.rwait.Signal()
r.bufpipe.wwait.Signal()
}
return nil
}
// Close closes the reader; subsequent writes to the write half of the pipe will return the error io.ErrClosedPipe.
func (r *PipeReader) Close() error {
return r.CloseWithError(nil)
}
// A PipeWriter is the write half of a pipe.
type PipeWriter struct {
*bufpipe
}
// CloseWithError closes the writer; once the buffer is empty subsequent reads from the read half of the pipe will return
// no bytes and the error err, or io.EOF if err is nil. CloseWithError always returns nil.
func (w *PipeWriter) CloseWithError(err error) error {
if err == nil {
err = io.EOF
}
w.bufpipe.l.Lock()
defer w.bufpipe.l.Unlock()
if w.bufpipe.werr == nil {
w.bufpipe.werr = err
w.bufpipe.rwait.Signal()
w.bufpipe.wwait.Signal()
}
return nil
}
// Close closes the writer; once the buffer is empty subsequent reads from the read half of the pipe will return
// no bytes and io.EOF after all the buffer has been read. CloseWithError always returns nil.
func (w *PipeWriter) Close() error {
return w.CloseWithError(nil)
}
type bufpipe struct {
rl sync.Mutex
wl sync.Mutex
l sync.Mutex
rwait sync.Cond
wwait sync.Cond
b Buffer
rerr error // if reader closed, error to give writes
werr error // if writer closed, error to give reads
}
func newBufferedPipe(buf Buffer) *bufpipe {
s := &bufpipe{
b: buf,
}
s.rwait.L = &s.l
s.wwait.L = &s.l
return s
}
func empty(buf Buffer) bool {
return buf.Len() == 0
}
func gap(buf Buffer) int64 {
return buf.Cap() - buf.Len()
}
func (r *PipeReader) Read(p []byte) (n int, err error) {
r.rl.Lock()
defer r.rl.Unlock()
r.l.Lock()
defer r.wwait.Signal()
defer r.l.Unlock()
for empty(r.b) {
if r.rerr != nil {
return 0, io.ErrClosedPipe
}
if r.werr != nil {
return 0, r.werr
}
r.wwait.Signal()
r.rwait.Wait()
}
n, err = r.b.Read(p)
if err == io.EOF {
err = nil
}
return n, err
}
func (w *PipeWriter) Write(p []byte) (int, error) {
var m int
var n, space int64
var err error
sliceLen := int64(len(p))
w.wl.Lock()
defer w.wl.Unlock()
w.l.Lock()
defer w.rwait.Signal()
defer w.l.Unlock()
if w.werr != nil {
return 0, io.ErrClosedPipe
}
// while there is data to write
for writeLen := sliceLen; writeLen > 0 && err == nil; writeLen = sliceLen - n {
// wait for some buffer space to become available (while no errs)
for space = gap(w.b); space == 0 && w.rerr == nil && w.werr == nil; space = gap(w.b) {
w.rwait.Signal()
w.wwait.Wait()
}
if w.rerr != nil {
err = w.rerr
break
}
if w.werr != nil {
err = io.ErrClosedPipe
break
}
// space > 0, and locked
var nn int64
if space < writeLen {
// => writeLen - space > 0
// => (sliceLen - n) - space > 0
// => sliceLen > n + space
// nn is safe to use for p[:nn]
nn = n + space
} else {
nn = sliceLen
}
m, err = w.b.Write(p[n:nn])
n += int64(m)
// one of the following cases has occurred:
// 1. done writing -> writeLen == 0
// 2. ran out of buffer space -> gap(w.b) == 0
// 3. an error occurred err != nil
// all of these cases are handled at the top of this loop
}
return int(n), err
}

8
vendor/modules.txt vendored
View File

@ -231,6 +231,14 @@ github.com/denisenkom/go-mssqldb/internal/querytext
github.com/dgrijalva/jwt-go github.com/dgrijalva/jwt-go
# github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
github.com/dgryski/go-rendezvous github.com/dgryski/go-rendezvous
# github.com/djherbis/buffer v1.2.0
## explicit
github.com/djherbis/buffer
github.com/djherbis/buffer/limio
github.com/djherbis/buffer/wrapio
# github.com/djherbis/nio/v3 v3.0.1
## explicit
github.com/djherbis/nio/v3
# github.com/dlclark/regexp2 v1.4.0 # github.com/dlclark/regexp2 v1.4.0
github.com/dlclark/regexp2 github.com/dlclark/regexp2
github.com/dlclark/regexp2/syntax github.com/dlclark/regexp2/syntax