mirror of
1
Fork 0

[chore]: Bump github.com/minio/minio-go/v7 from 7.0.67 to 7.0.69 (#2748)

This commit is contained in:
dependabot[bot] 2024-03-11 10:51:13 +00:00 committed by GitHub
parent 5a56f4f8fb
commit 8e88ee8d9c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 487 additions and 370 deletions

4
go.mod
View File

@ -40,7 +40,7 @@ require (
github.com/jackc/pgx/v5 v5.5.5 github.com/jackc/pgx/v5 v5.5.5
github.com/microcosm-cc/bluemonday v1.0.26 github.com/microcosm-cc/bluemonday v1.0.26
github.com/miekg/dns v1.1.58 github.com/miekg/dns v1.1.58
github.com/minio/minio-go/v7 v7.0.67 github.com/minio/minio-go/v7 v7.0.69
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_golang v1.18.0
@ -155,7 +155,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.17.4 // indirect github.com/klauspost/compress v1.17.6 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect

8
go.sum
View File

@ -449,8 +449,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
@ -495,8 +495,8 @@ github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= github.com/minio/minio-go/v7 v7.0.69 h1:l8AnsQFyY1xiwa/DaQskY4NXSLA2yrGsW5iD9nRPVS0=
github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= github.com/minio/minio-go/v7 v7.0.69/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=

View File

@ -212,7 +212,7 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
// Should only be used after a start/reset. // Should only be used after a start/reset.
func (d *compressor) fillWindow(b []byte) { func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only or huffman mode. // Do not fill window if we are in store-only or huffman mode.
if d.level <= 0 { if d.level <= 0 && d.level > -MinCustomWindowSize {
return return
} }
if d.fast != nil { if d.fast != nil {

View File

@ -0,0 +1,13 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !race
package race
func ReadSlice[T any](s []T) {
}
func WriteSlice[T any](s []T) {
}

View File

@ -0,0 +1,26 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build race
package race
import (
"runtime"
"unsafe"
)
func ReadSlice[T any](s []T) {
if len(s) == 0 {
return
}
runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0])))
}
func WriteSlice[T any](s []T) {
if len(s) == 0 {
return
}
runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0])))
}

View File

@ -10,6 +10,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"strconv" "strconv"
"github.com/klauspost/compress/internal/race"
) )
var ( var (
@ -63,6 +65,10 @@ func Decode(dst, src []byte) ([]byte, error) {
} else { } else {
dst = make([]byte, dLen) dst = make([]byte, dLen)
} }
race.WriteSlice(dst)
race.ReadSlice(src[s:])
if s2Decode(dst, src[s:]) != 0 { if s2Decode(dst, src[s:]) != 0 {
return nil, ErrCorrupt return nil, ErrCorrupt
} }

View File

@ -3,6 +3,8 @@
package s2 package s2
import "github.com/klauspost/compress/internal/race"
const hasAmd64Asm = true const hasAmd64Asm = true
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
@ -14,6 +16,9 @@ const hasAmd64Asm = true
// len(dst) >= MaxEncodedLen(len(src)) && // len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) { func encodeBlock(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const ( const (
// Use 12 bit table when less than... // Use 12 bit table when less than...
limit12B = 16 << 10 limit12B = 16 << 10
@ -50,6 +55,9 @@ func encodeBlock(dst, src []byte) (d int) {
// len(dst) >= MaxEncodedLen(len(src)) && // len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetter(dst, src []byte) (d int) { func encodeBlockBetter(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const ( const (
// Use 12 bit table when less than... // Use 12 bit table when less than...
limit12B = 16 << 10 limit12B = 16 << 10
@ -86,6 +94,9 @@ func encodeBlockBetter(dst, src []byte) (d int) {
// len(dst) >= MaxEncodedLen(len(src)) && // len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockSnappy(dst, src []byte) (d int) { func encodeBlockSnappy(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const ( const (
// Use 12 bit table when less than... // Use 12 bit table when less than...
limit12B = 16 << 10 limit12B = 16 << 10
@ -121,6 +132,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
// len(dst) >= MaxEncodedLen(len(src)) && // len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappy(dst, src []byte) (d int) { func encodeBlockBetterSnappy(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const ( const (
// Use 12 bit table when less than... // Use 12 bit table when less than...
limit12B = 16 << 10 limit12B = 16 << 10

View File

@ -104,12 +104,14 @@ func ReaderIgnoreStreamIdentifier() ReaderOption {
// For each chunk with the ID, the callback is called with the content. // For each chunk with the ID, the callback is called with the content.
// Any returned non-nil error will abort decompression. // Any returned non-nil error will abort decompression.
// Only one callback per ID is supported, latest sent will be used. // Only one callback per ID is supported, latest sent will be used.
// You can peek the stream, triggering the callback, by doing a Read with a 0
// byte buffer.
func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
return func(r *Reader) error { return func(r *Reader) error {
if id < 0x80 || id > 0xfd { if id < 0x80 || id > 0xfd {
return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)")
} }
r.skippableCB[id] = fn r.skippableCB[id-0x80] = fn
return nil return nil
} }
} }
@ -128,7 +130,7 @@ type Reader struct {
err error err error
decoded []byte decoded []byte
buf []byte buf []byte
skippableCB [0x80]func(r io.Reader) error skippableCB [0xff - 0x80]func(r io.Reader) error
blockStart int64 // Uncompressed offset at start of current. blockStart int64 // Uncompressed offset at start of current.
index *Index index *Index
@ -201,7 +203,7 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
// The supplied slice does not need to be the size of the read. // The supplied slice does not need to be the size of the read.
func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) {
if id < 0x80 { if id < 0x80 {
r.err = fmt.Errorf("interbal error: skippable id < 0x80") r.err = fmt.Errorf("internal error: skippable id < 0x80")
return false return false
} }
if fn := r.skippableCB[id-0x80]; fn != nil { if fn := r.skippableCB[id-0x80]; fn != nil {
@ -450,6 +452,12 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e
for toWrite := range queue { for toWrite := range queue {
entry := <-toWrite entry := <-toWrite
reUse <- toWrite reUse <- toWrite
if hasErr() || entry == nil {
if entry != nil {
writtenBlocks <- entry
}
continue
}
if hasErr() { if hasErr() {
writtenBlocks <- entry writtenBlocks <- entry
continue continue
@ -469,13 +477,13 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e
} }
}() }()
// Reader
defer func() { defer func() {
close(queue)
if r.err != nil { if r.err != nil {
err = r.err
setErr(r.err) setErr(r.err)
} else if err != nil {
setErr(err)
} }
close(queue)
wg.Wait() wg.Wait()
if err == nil { if err == nil {
err = aErr err = aErr
@ -483,6 +491,7 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e
written = aWritten written = aWritten
}() }()
// Reader
for !hasErr() { for !hasErr() {
if !r.readFull(r.buf[:4], true) { if !r.readFull(r.buf[:4], true) {
if r.err == io.EOF { if r.err == io.EOF {
@ -551,11 +560,13 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e
if err != nil { if err != nil {
writtenBlocks <- decoded writtenBlocks <- decoded
setErr(err) setErr(err)
entry <- nil
return return
} }
if !r.ignoreCRC && crc(decoded) != checksum { if !r.ignoreCRC && crc(decoded) != checksum {
writtenBlocks <- decoded writtenBlocks <- decoded
setErr(ErrCRC) setErr(ErrCRC)
entry <- nil
return return
} }
entry <- decoded entry <- decoded
@ -1048,15 +1059,17 @@ func (r *Reader) ReadByte() (byte, error) {
} }
// SkippableCB will register a callback for chunks with the specified ID. // SkippableCB will register a callback for chunks with the specified ID.
// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive). // ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
// For each chunk with the ID, the callback is called with the content. // For each chunk with the ID, the callback is called with the content.
// Any returned non-nil error will abort decompression. // Any returned non-nil error will abort decompression.
// Only one callback per ID is supported, latest sent will be used. // Only one callback per ID is supported, latest sent will be used.
// Sending a nil function will disable previous callbacks. // Sending a nil function will disable previous callbacks.
// You can peek the stream, triggering the callback, by doing a Read with a 0
// byte buffer.
func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
if id < 0x80 || id > chunkTypePadding { if id < 0x80 || id >= chunkTypePadding {
return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)")
} }
r.skippableCB[id] = fn r.skippableCB[id-0x80] = fn
return nil return nil
} }

View File

@ -37,6 +37,8 @@ package s2
import ( import (
"bytes" "bytes"
"hash/crc32" "hash/crc32"
"github.com/klauspost/compress/internal/race"
) )
/* /*
@ -112,6 +114,8 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of // crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt // https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 { func crc(b []byte) uint32 {
race.ReadSlice(b)
c := crc32.Update(0, crcTable, b) c := crc32.Update(0, crcTable, b)
return c>>15 | c<<17 + 0xa282ead8 return c>>15 | c<<17 + 0xa282ead8
} }

View File

@ -13,6 +13,8 @@ import (
"io" "io"
"runtime" "runtime"
"sync" "sync"
"github.com/klauspost/compress/internal/race"
) )
const ( const (
@ -271,7 +273,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
return fmt.Errorf("skippable block excessed maximum size") return fmt.Errorf("skippable block excessed maximum size")
} }
var header [4]byte var header [4]byte
chunkLen := 4 + len(data) chunkLen := len(data)
header[0] = id header[0] = id
header[1] = uint8(chunkLen >> 0) header[1] = uint8(chunkLen >> 0)
header[2] = uint8(chunkLen >> 8) header[2] = uint8(chunkLen >> 8)
@ -282,7 +284,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
if err = w.err(err); err != nil { if err = w.err(err); err != nil {
return err return err
} }
if n != len(data) { if n != len(b) {
return w.err(io.ErrShortWrite) return w.err(io.ErrShortWrite)
} }
w.written += int64(n) w.written += int64(n)
@ -303,9 +305,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
if err := write(header[:]); err != nil { if err := write(header[:]); err != nil {
return err return err
} }
if err := write(data); err != nil { return write(data)
return err
}
} }
// Create output... // Create output...
@ -385,6 +385,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
buf = buf[len(uncompressed):] buf = buf[len(uncompressed):]
// Get an output buffer. // Get an output buffer.
obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
race.WriteSlice(obuf)
output := make(chan result) output := make(chan result)
// Queue output now, so we keep order. // Queue output now, so we keep order.
w.output <- output w.output <- output
@ -393,6 +395,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
} }
w.uncompWritten += int64(len(uncompressed)) w.uncompWritten += int64(len(uncompressed))
go func() { go func() {
race.ReadSlice(uncompressed)
checksum := crc(uncompressed) checksum := crc(uncompressed)
// Set to uncompressed. // Set to uncompressed.

View File

@ -1365,60 +1365,6 @@ THE SOFTWARE.
================================================================ ================================================================
github.com/sirupsen/logrus
https://github.com/sirupsen/logrus
----------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================================================
github.com/stretchr/testify
https://github.com/stretchr/testify
----------------------------------------------------------------
MIT License
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================================
golang.org/x/crypto golang.org/x/crypto
https://golang.org/x/crypto https://golang.org/x/crypto
---------------------------------------------------------------- ----------------------------------------------------------------
@ -1748,59 +1694,3 @@ third-party archives.
================================================================ ================================================================
gopkg.in/yaml.v3
https://gopkg.in/yaml.v3
----------------------------------------------------------------
This project is covered by two different licenses: MIT and Apache.
#### MIT License ####
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original MIT license, with the additional
copyright staring in 2011 when the project was ported over:
apic.go emitterc.go parserc.go readerc.go scannerc.go
writerc.go yamlh.go yamlprivateh.go
Copyright (c) 2006-2010 Kirill Simonov
Copyright (c) 2006-2011 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
### Apache License ###
All the remaining project files are covered by the Apache license:
Copyright (c) 2011-2019 Canonical Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================================

View File

@ -119,7 +119,7 @@ func (opts CopyDestOptions) Marshal(header http.Header) {
if opts.ReplaceMetadata { if opts.ReplaceMetadata {
header.Set("x-amz-metadata-directive", replaceDirective) header.Set("x-amz-metadata-directive", replaceDirective)
for k, v := range filterCustomMeta(opts.UserMetadata) { for k, v := range filterCustomMeta(opts.UserMetadata) {
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) {
header.Set(k, v) header.Set(k, v)
} else { } else {
header.Set("x-amz-meta-"+k, v) header.Set("x-amz-meta-"+k, v)

View File

@ -212,7 +212,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
} }
for k, v := range opts.UserMetadata { for k, v := range opts.UserMetadata {
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) {
header.Set(k, v) header.Set(k, v)
} else { } else {
header.Set("x-amz-meta-"+k, v) header.Set("x-amz-meta-"+k, v)
@ -230,7 +230,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
// validate() checks if the UserMetadata map has standard headers or and raises an error if so. // validate() checks if the UserMetadata map has standard headers or and raises an error if so.
func (opts PutObjectOptions) validate() (err error) { func (opts PutObjectOptions) validate() (err error) {
for k, v := range opts.UserMetadata { for k, v := range opts.UserMetadata {
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) {
return errInvalidArgument(k + " unsupported user defined metadata name") return errInvalidArgument(k + " unsupported user defined metadata name")
} }
if !httpguts.ValidHeaderFieldValue(v) { if !httpguts.ValidHeaderFieldValue(v) {

View File

@ -1,6 +1,6 @@
/* /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage * MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2023 MinIO, Inc. * Copyright 2015-2024 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -80,6 +80,8 @@ type Client struct {
// S3 specific accelerated endpoint. // S3 specific accelerated endpoint.
s3AccelerateEndpoint string s3AccelerateEndpoint string
// S3 dual-stack endpoints are enabled by default.
s3DualstackEnabled bool
// Region endpoint // Region endpoint
region string region string
@ -127,7 +129,7 @@ type Options struct {
// Global constants. // Global constants.
const ( const (
libraryName = "minio-go" libraryName = "minio-go"
libraryVersion = "v7.0.67" libraryVersion = "v7.0.69"
) )
// User Agent should always following the below style. // User Agent should always following the below style.
@ -158,9 +160,12 @@ func New(endpoint string, opts *Options) (*Client, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
// If Amazon S3 set to signature v4.
if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
// If Amazon S3 set to signature v4.
clnt.overrideSignerType = credentials.SignatureV4 clnt.overrideSignerType = credentials.SignatureV4
// Amazon S3 endpoints are resolved into dual-stack endpoints by default
// for backwards compatibility.
clnt.s3DualstackEnabled = true
} }
return clnt, nil return clnt, nil
@ -330,6 +335,16 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
} }
} }
// SetS3EnableDualstack turns s3 dual-stack endpoints on or off for all requests.
// The feature is only specific to S3 and is on by default. To read more about
// Amazon S3 dual-stack endpoints visit -
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html
func (c *Client) SetS3EnableDualstack(enabled bool) {
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
c.s3DualstackEnabled = enabled
}
}
// Hash materials provides relevant initialized hash algo writers // Hash materials provides relevant initialized hash algo writers
// based on the expected signature type. // based on the expected signature type.
// //
@ -926,7 +941,7 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
// Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint
if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) {
// Fetch new host based on the bucket location. // Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation) host = getS3Endpoint(bucketLocation, c.s3DualstackEnabled)
} }
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -237,6 +237,7 @@ func (m *STSAssumeRole) Retrieve() (Value, error) {
AccessKeyID: a.Result.Credentials.AccessKey, AccessKeyID: a.Result.Credentials.AccessKey,
SecretAccessKey: a.Result.Credentials.SecretKey, SecretAccessKey: a.Result.Credentials.SecretKey,
SessionToken: a.Result.Credentials.SessionToken, SessionToken: a.Result.Credentials.SessionToken,
Expiration: a.Result.Credentials.Expiration,
SignerType: SignatureV4, SignerType: SignatureV4,
}, nil }, nil
} }

View File

@ -30,17 +30,20 @@ const (
defaultExpiryWindow = 0.8 defaultExpiryWindow = 0.8
) )
// A Value is the AWS credentials value for individual credential fields. // A Value is the S3 credentials value for individual credential fields.
type Value struct { type Value struct {
// AWS Access key ID // S3 Access key ID
AccessKeyID string AccessKeyID string
// AWS Secret Access Key // S3 Secret Access Key
SecretAccessKey string SecretAccessKey string
// AWS Session Token // S3 Session Token
SessionToken string SessionToken string
// Expiration of this credentials - null means no expiration associated
Expiration time.Time
// Signature Type. // Signature Type.
SignerType SignatureType SignerType SignatureType
} }

View File

@ -129,6 +129,7 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
AccessKeyID: externalProcessCredentials.AccessKeyID, AccessKeyID: externalProcessCredentials.AccessKeyID,
SecretAccessKey: externalProcessCredentials.SecretAccessKey, SecretAccessKey: externalProcessCredentials.SecretAccessKey,
SessionToken: externalProcessCredentials.SessionToken, SessionToken: externalProcessCredentials.SessionToken,
Expiration: externalProcessCredentials.Expiration,
SignerType: SignatureV4, SignerType: SignatureV4,
}, nil }, nil
} }

View File

@ -61,6 +61,7 @@ type IAM struct {
// Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
Container struct { Container struct {
AuthorizationToken string AuthorizationToken string
AuthorizationTokenFile string
CredentialsFullURI string CredentialsFullURI string
CredentialsRelativeURI string CredentialsRelativeURI string
} }
@ -105,6 +106,11 @@ func (m *IAM) Retrieve() (Value, error) {
token = m.Container.AuthorizationToken token = m.Container.AuthorizationToken
} }
tokenFile := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE")
if tokenFile == "" {
tokenFile = m.Container.AuthorizationToken
}
relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
if relativeURI == "" { if relativeURI == "" {
relativeURI = m.Container.CredentialsRelativeURI relativeURI = m.Container.CredentialsRelativeURI
@ -181,6 +187,10 @@ func (m *IAM) Retrieve() (Value, error) {
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
case tokenFile != "" && fullURI != "":
endpoint = fullURI
roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile)
case fullURI != "": case fullURI != "":
if len(endpoint) == 0 { if len(endpoint) == 0 {
endpoint = fullURI endpoint = fullURI
@ -209,6 +219,7 @@ func (m *IAM) Retrieve() (Value, error) {
AccessKeyID: roleCreds.AccessKeyID, AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey, SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token, SessionToken: roleCreds.Token,
Expiration: roleCreds.Expiration,
SignerType: SignatureV4, SignerType: SignatureV4,
}, nil }, nil
} }
@ -304,6 +315,18 @@ func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2Role
return respCreds, nil return respCreds, nil
} }
func getEKSPodIdentityCredentials(client *http.Client, endpoint string, tokenFile string) (ec2RoleCredRespBody, error) {
if tokenFile != "" {
bytes, err := os.ReadFile(tokenFile)
if err != nil {
return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: failed to read token file:%s", err)
}
token := string(bytes)
return getEcsTaskCredentials(client, endpoint, token)
}
return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: no tokenFile found")
}
func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel() defer cancel()

View File

@ -177,6 +177,7 @@ func (m *STSClientGrants) Retrieve() (Value, error) {
AccessKeyID: a.Result.Credentials.AccessKey, AccessKeyID: a.Result.Credentials.AccessKey,
SecretAccessKey: a.Result.Credentials.SecretKey, SecretAccessKey: a.Result.Credentials.SecretKey,
SessionToken: a.Result.Credentials.SessionToken, SessionToken: a.Result.Credentials.SessionToken,
Expiration: a.Result.Credentials.Expiration,
SignerType: SignatureV4, SignerType: SignatureV4,
}, nil }, nil
} }

View File

@ -113,6 +113,7 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
AccessKeyID: cr.AccessKey, AccessKeyID: cr.AccessKey,
SecretAccessKey: cr.SecretKey, SecretAccessKey: cr.SecretKey,
SessionToken: cr.SessionToken, SessionToken: cr.SessionToken,
Expiration: cr.Expiration,
SignerType: SignatureV4, SignerType: SignatureV4,
}, nil }, nil
} }

View File

@ -184,6 +184,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
AccessKeyID: cr.AccessKey, AccessKeyID: cr.AccessKey,
SecretAccessKey: cr.SecretKey, SecretAccessKey: cr.SecretKey,
SessionToken: cr.SessionToken, SessionToken: cr.SessionToken,
Expiration: cr.Expiration,
SignerType: SignatureV4, SignerType: SignatureV4,
}, nil }, nil
} }

View File

@ -188,6 +188,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
AccessKeyID: response.Result.Credentials.AccessKey, AccessKeyID: response.Result.Credentials.AccessKey,
SecretAccessKey: response.Result.Credentials.SecretKey, SecretAccessKey: response.Result.Credentials.SecretKey,
SessionToken: response.Result.Credentials.SessionToken, SessionToken: response.Result.Credentials.SessionToken,
Expiration: response.Result.Credentials.Expiration,
SignerType: SignatureDefault, SignerType: SignatureDefault,
}, nil }, nil
} }

View File

@ -195,6 +195,7 @@ func (m *STSWebIdentity) Retrieve() (Value, error) {
AccessKeyID: a.Result.Credentials.AccessKey, AccessKeyID: a.Result.Credentials.AccessKey,
SecretAccessKey: a.Result.Credentials.SecretKey, SecretAccessKey: a.Result.Credentials.SecretKey,
SessionToken: a.Result.Credentials.SessionToken, SessionToken: a.Result.Credentials.SessionToken,
Expiration: a.Result.Credentials.Expiration,
SignerType: SignatureV4, SignerType: SignatureV4,
}, nil }, nil
} }

View File

@ -118,6 +118,7 @@ var retryableHTTPStatusCodes = map[int]struct{}{
http.StatusBadGateway: {}, http.StatusBadGateway: {},
http.StatusServiceUnavailable: {}, http.StatusServiceUnavailable: {},
http.StatusGatewayTimeout: {}, http.StatusGatewayTimeout: {},
520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected.
// Add more HTTP status codes here. // Add more HTTP status codes here.
} }

View File

@ -1,6 +1,6 @@
/* /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage * MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc. * Copyright 2015-2024 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,48 +17,155 @@
package minio package minio
type awsS3Endpoint struct {
endpoint string
dualstackEndpoint string
}
// awsS3EndpointMap Amazon S3 endpoint map. // awsS3EndpointMap Amazon S3 endpoint map.
var awsS3EndpointMap = map[string]string{ var awsS3EndpointMap = map[string]awsS3Endpoint{
"us-east-1": "s3.dualstack.us-east-1.amazonaws.com", "us-east-1": {
"us-east-2": "s3.dualstack.us-east-2.amazonaws.com", "s3.us-east-1.amazonaws.com",
"us-west-2": "s3.dualstack.us-west-2.amazonaws.com", "s3.dualstack.us-east-1.amazonaws.com",
"us-west-1": "s3.dualstack.us-west-1.amazonaws.com", },
"ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", "us-east-2": {
"eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", "s3.us-east-2.amazonaws.com",
"eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", "s3.dualstack.us-east-2.amazonaws.com",
"eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", },
"eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", "us-west-2": {
"eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", "s3.us-west-2.amazonaws.com",
"eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", "s3.dualstack.us-west-2.amazonaws.com",
"eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", },
"eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", "us-west-1": {
"ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", "s3.us-west-1.amazonaws.com",
"ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", "s3.dualstack.us-west-1.amazonaws.com",
"ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com", },
"ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", "ca-central-1": {
"ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", "s3.ca-central-1.amazonaws.com",
"ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", "s3.dualstack.ca-central-1.amazonaws.com",
"ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", },
"ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", "eu-west-1": {
"af-south-1": "s3.dualstack.af-south-1.amazonaws.com", "s3.eu-west-1.amazonaws.com",
"me-central-1": "s3.dualstack.me-central-1.amazonaws.com", "s3.dualstack.eu-west-1.amazonaws.com",
"me-south-1": "s3.dualstack.me-south-1.amazonaws.com", },
"sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", "eu-west-2": {
"us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", "s3.eu-west-2.amazonaws.com",
"us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", "s3.dualstack.eu-west-2.amazonaws.com",
"cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", },
"cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", "eu-west-3": {
"ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com", "s3.eu-west-3.amazonaws.com",
"ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com", "s3.dualstack.eu-west-3.amazonaws.com",
"il-central-1": "s3.dualstack.il-central-1.amazonaws.com", },
"eu-central-1": {
"s3.eu-central-1.amazonaws.com",
"s3.dualstack.eu-central-1.amazonaws.com",
},
"eu-central-2": {
"s3.eu-central-2.amazonaws.com",
"s3.dualstack.eu-central-2.amazonaws.com",
},
"eu-north-1": {
"s3.eu-north-1.amazonaws.com",
"s3.dualstack.eu-north-1.amazonaws.com",
},
"eu-south-1": {
"s3.eu-south-1.amazonaws.com",
"s3.dualstack.eu-south-1.amazonaws.com",
},
"eu-south-2": {
"s3.eu-south-2.amazonaws.com",
"s3.dualstack.eu-south-2.amazonaws.com",
},
"ap-east-1": {
"s3.ap-east-1.amazonaws.com",
"s3.dualstack.ap-east-1.amazonaws.com",
},
"ap-south-1": {
"s3.ap-south-1.amazonaws.com",
"s3.dualstack.ap-south-1.amazonaws.com",
},
"ap-south-2": {
"s3.ap-south-2.amazonaws.com",
"s3.dualstack.ap-south-2.amazonaws.com",
},
"ap-southeast-1": {
"s3.ap-southeast-1.amazonaws.com",
"s3.dualstack.ap-southeast-1.amazonaws.com",
},
"ap-southeast-2": {
"s3.ap-southeast-2.amazonaws.com",
"s3.dualstack.ap-southeast-2.amazonaws.com",
},
"ap-southeast-3": {
"s3.ap-southeast-3.amazonaws.com",
"s3.dualstack.ap-southeast-3.amazonaws.com",
},
"ap-southeast-4": {
"s3.ap-southeast-4.amazonaws.com",
"s3.dualstack.ap-southeast-4.amazonaws.com",
},
"ap-northeast-1": {
"s3.ap-northeast-1.amazonaws.com",
"s3.dualstack.ap-northeast-1.amazonaws.com",
},
"ap-northeast-2": {
"s3.ap-northeast-2.amazonaws.com",
"s3.dualstack.ap-northeast-2.amazonaws.com",
},
"ap-northeast-3": {
"s3.ap-northeast-3.amazonaws.com",
"s3.dualstack.ap-northeast-3.amazonaws.com",
},
"af-south-1": {
"s3.af-south-1.amazonaws.com",
"s3.dualstack.af-south-1.amazonaws.com",
},
"me-central-1": {
"s3.me-central-1.amazonaws.com",
"s3.dualstack.me-central-1.amazonaws.com",
},
"me-south-1": {
"s3.me-south-1.amazonaws.com",
"s3.dualstack.me-south-1.amazonaws.com",
},
"sa-east-1": {
"s3.sa-east-1.amazonaws.com",
"s3.dualstack.sa-east-1.amazonaws.com",
},
"us-gov-west-1": {
"s3.us-gov-west-1.amazonaws.com",
"s3.dualstack.us-gov-west-1.amazonaws.com",
},
"us-gov-east-1": {
"s3.us-gov-east-1.amazonaws.com",
"s3.dualstack.us-gov-east-1.amazonaws.com",
},
"cn-north-1": {
"s3.cn-north-1.amazonaws.com.cn",
"s3.dualstack.cn-north-1.amazonaws.com.cn",
},
"cn-northwest-1": {
"s3.cn-northwest-1.amazonaws.com.cn",
"s3.dualstack.cn-northwest-1.amazonaws.com.cn",
},
"il-central-1": {
"s3.il-central-1.amazonaws.com",
"s3.dualstack.il-central-1.amazonaws.com",
},
} }
// getS3Endpoint get Amazon S3 endpoint based on the bucket location. // getS3Endpoint get Amazon S3 endpoint based on the bucket location.
func getS3Endpoint(bucketLocation string) (s3Endpoint string) { func getS3Endpoint(bucketLocation string, useDualstack bool) (endpoint string) {
s3Endpoint, ok := awsS3EndpointMap[bucketLocation] s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
if !ok { if !ok {
// Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. // Default to 's3.us-east-1.amazonaws.com' endpoint.
s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" if useDualstack {
return "s3.dualstack.us-east-1.amazonaws.com"
}
return "s3.us-east-1.amazonaws.com"
} }
return s3Endpoint if useDualstack {
return s3Endpoint.dualstackEndpoint
}
return s3Endpoint.endpoint
} }

View File

@ -512,6 +512,21 @@ func isAmzHeader(headerKey string) bool {
return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
} }
var supportedReplicationEncryptionHeaders = map[string]bool{
"x-minio-replication-server-side-encryption-sealed-key": true,
"x-minio-replication-server-side-encryption-seal-algorithm": true,
"x-minio-replication-server-side-encryption-iv": true,
"x-minio-replication-encrypted-multipart": true,
"x-minio-replication-actual-object-size": true,
// Add more supported headers here.
// Must be lower case.
}
// isValidReplicationEncryptionHeader returns true if header is one of valid replication encryption headers
func isValidReplicationEncryptionHeader(headerKey string) bool {
return supportedReplicationEncryptionHeaders[strings.ToLower(headerKey)]
}
// supportedQueryValues is a list of query strings that can be passed in when using GetObject. // supportedQueryValues is a list of query strings that can be passed in when using GetObject.
var supportedQueryValues = map[string]bool{ var supportedQueryValues = map[string]bool{
"attributes": true, "attributes": true,

7
vendor/modules.txt vendored
View File

@ -439,10 +439,11 @@ github.com/json-iterator/go
# github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
## explicit ## explicit
github.com/kballard/go-shellquote github.com/kballard/go-shellquote
# github.com/klauspost/compress v1.17.4 # github.com/klauspost/compress v1.17.6
## explicit; go 1.19 ## explicit; go 1.19
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
github.com/klauspost/compress/gzip github.com/klauspost/compress/gzip
github.com/klauspost/compress/internal/race
github.com/klauspost/compress/s2 github.com/klauspost/compress/s2
github.com/klauspost/compress/snappy github.com/klauspost/compress/snappy
github.com/klauspost/compress/zlib github.com/klauspost/compress/zlib
@ -483,8 +484,8 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2 # github.com/minio/md5-simd v1.1.2
## explicit; go 1.14 ## explicit; go 1.14
github.com/minio/md5-simd github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.67 # github.com/minio/minio-go/v7 v7.0.69
## explicit; go 1.17 ## explicit; go 1.21
github.com/minio/minio-go/v7 github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/credentials
github.com/minio/minio-go/v7/pkg/encrypt github.com/minio/minio-go/v7/pkg/encrypt