[bugfix] s3 media uploaded without content-type (#3353)
* update go-storage dependency, for S3Storage manually call PutObject() so we can set content-type * update calls to PutFile() to include the contentType
This commit is contained in:
parent
b0fbc327f0
commit
53ee6aef08
4
go.mod
4
go.mod
|
@ -21,7 +21,7 @@ require (
|
|||
codeberg.org/gruf/go-mutexes v1.5.1
|
||||
codeberg.org/gruf/go-runners v1.6.2
|
||||
codeberg.org/gruf/go-sched v1.2.3
|
||||
codeberg.org/gruf/go-storage v0.1.2
|
||||
codeberg.org/gruf/go-storage v0.2.0
|
||||
codeberg.org/gruf/go-structr v0.8.9
|
||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
||||
github.com/DmitriyVTitov/size v1.5.0
|
||||
|
@ -42,7 +42,7 @@ require (
|
|||
github.com/k3a/html2text v1.2.1
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/miekg/dns v1.1.62
|
||||
github.com/minio/minio-go/v7 v7.0.76
|
||||
github.com/minio/minio-go/v7 v7.0.77
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/ncruces/go-sqlite3 v0.18.3
|
||||
github.com/oklog/ulid v1.3.1
|
||||
|
|
8
go.sum
8
go.sum
|
@ -76,8 +76,8 @@ codeberg.org/gruf/go-runners v1.6.2 h1:oQef9niahfHu/wch14xNxlRMP8i+ABXH1Cb9PzZ4o
|
|||
codeberg.org/gruf/go-runners v1.6.2/go.mod h1:Tq5PrZ/m/rBXbLZz0u5if+yP3nG5Sf6S8O/GnyEePeQ=
|
||||
codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk=
|
||||
codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A=
|
||||
codeberg.org/gruf/go-storage v0.1.2 h1:dIOVOKq1CJpRmuhbB8Zok3mmo8V6VV/nX5GLIm6hywA=
|
||||
codeberg.org/gruf/go-storage v0.1.2/go.mod h1:LRDpFHqRJi0f+35c3ltBH2e/pGfwY5dGlNlgCJ/R1DA=
|
||||
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
|
||||
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
|
||||
codeberg.org/gruf/go-structr v0.8.9 h1:OyiSspWYCeJOm356fFPd+bDRumPrard2VAUXAPqZiJ0=
|
||||
codeberg.org/gruf/go-structr v0.8.9/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM=
|
||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go=
|
||||
|
@ -419,8 +419,8 @@ github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
|||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.76 h1:9nxHH2XDai61cT/EFhyIw/wW4vJfpPNvl7lSFpRt+Ng=
|
||||
github.com/minio/minio-go/v7 v7.0.76/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
|
||||
github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
|
||||
github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
|
|
|
@ -216,10 +216,18 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
|||
"png",
|
||||
)
|
||||
|
||||
// Get mimetype for the file container
|
||||
// type, falling back to generic data.
|
||||
p.emoji.ImageContentType = getMimeType(ext)
|
||||
|
||||
// Set the known emoji static content type.
|
||||
p.emoji.ImageStaticContentType = "image/png"
|
||||
|
||||
// Copy temporary file into storage at path.
|
||||
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.emoji.ImagePath,
|
||||
temppath,
|
||||
p.emoji.ImageContentType,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing emoji to storage: %w", err)
|
||||
|
@ -229,6 +237,7 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
|||
staticsz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.emoji.ImageStaticPath,
|
||||
staticpath,
|
||||
p.emoji.ImageStaticContentType,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing static to storage: %w", err)
|
||||
|
@ -256,13 +265,6 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
|||
"png",
|
||||
)
|
||||
|
||||
// Get mimetype for the file container
|
||||
// type, falling back to generic data.
|
||||
p.emoji.ImageContentType = getMimeType(ext)
|
||||
|
||||
// Set the known emoji static content type.
|
||||
p.emoji.ImageStaticContentType = "image/png"
|
||||
|
||||
// We can now consider this cached.
|
||||
p.emoji.Cached = util.Ptr(true)
|
||||
|
||||
|
|
|
@ -261,10 +261,15 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
|||
ext,
|
||||
)
|
||||
|
||||
// Get mimetype for the file container
|
||||
// type, falling back to generic data.
|
||||
p.media.File.ContentType = getMimeType(ext)
|
||||
|
||||
// Copy temporary file into storage at path.
|
||||
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.media.File.Path,
|
||||
temppath,
|
||||
p.media.File.ContentType,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing media to storage: %w", err)
|
||||
|
@ -286,10 +291,14 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
|||
thumbExt,
|
||||
)
|
||||
|
||||
// Determine thumbnail content-type from thumb ext.
|
||||
p.media.Thumbnail.ContentType = getMimeType(thumbExt)
|
||||
|
||||
// Copy thumbnail file into storage at path.
|
||||
thumbsz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.media.Thumbnail.Path,
|
||||
thumbpath,
|
||||
p.media.Thumbnail.ContentType,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing thumb to storage: %w", err)
|
||||
|
@ -298,9 +307,6 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
|||
// Set final determined thumbnail size.
|
||||
p.media.Thumbnail.FileSize = int(thumbsz)
|
||||
|
||||
// Determine thumbnail content-type from thumb ext.
|
||||
p.media.Thumbnail.ContentType = getMimeType(thumbExt)
|
||||
|
||||
// Generate a media attachment thumbnail URL.
|
||||
p.media.Thumbnail.URL = uris.URIForAttachment(
|
||||
p.media.AccountID,
|
||||
|
@ -320,10 +326,6 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
|||
ext,
|
||||
)
|
||||
|
||||
// Get mimetype for the file container
|
||||
// type, falling back to generic data.
|
||||
p.media.File.ContentType = getMimeType(ext)
|
||||
|
||||
// We can now consider this cached.
|
||||
p.media.Cached = util.Ptr(true)
|
||||
|
||||
|
|
|
@ -97,23 +97,39 @@ func (d *Driver) Put(ctx context.Context, key string, value []byte) (int, error)
|
|||
return d.Storage.WriteBytes(ctx, key, value)
|
||||
}
|
||||
|
||||
// PutStream writes the bytes from supplied reader at key in the storage
|
||||
func (d *Driver) PutStream(ctx context.Context, key string, r io.Reader) (int64, error) {
|
||||
return d.Storage.WriteStream(ctx, key, r)
|
||||
}
|
||||
|
||||
// PutFile moves the contents of file at path, to storage.Driver{} under given key.
|
||||
func (d *Driver) PutFile(ctx context.Context, key string, filepath string) (int64, error) {
|
||||
// PutFile moves the contents of file at path, to storage.Driver{} under given key (with content-type if supported).
|
||||
func (d *Driver) PutFile(ctx context.Context, key, filepath, contentType string) (int64, error) {
|
||||
// Open file at path for reading.
|
||||
file, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return 0, gtserror.Newf("error opening file %s: %w", filepath, err)
|
||||
}
|
||||
|
||||
// Write the file data to storage under key. Note
|
||||
// that for disk.DiskStorage{} this should end up
|
||||
// being a highly optimized Linux sendfile syscall.
|
||||
sz, err := d.Storage.WriteStream(ctx, key, file)
|
||||
var sz int64
|
||||
|
||||
switch d := d.Storage.(type) {
|
||||
case *s3.S3Storage:
|
||||
var info minio.UploadInfo
|
||||
|
||||
// For S3 storage, write the file but specifically pass in the
|
||||
// content-type as an extra option. This handles the case of media
|
||||
// being served via CDN redirect (where we don't handle content-type).
|
||||
info, err = d.PutObject(ctx, key, file, minio.PutObjectOptions{
|
||||
ContentType: contentType,
|
||||
})
|
||||
|
||||
// Get size from
|
||||
// uploaded info.
|
||||
sz = info.Size
|
||||
|
||||
default:
|
||||
// Write the file data to storage under key. Note
|
||||
// that for disk.DiskStorage{} this should end up
|
||||
// being a highly optimized Linux sendfile syscall.
|
||||
sz, err = d.WriteStream(ctx, key, file)
|
||||
}
|
||||
|
||||
// Wrap write error.
|
||||
if err != nil {
|
||||
err = gtserror.Newf("error writing file %s: %w", key, err)
|
||||
}
|
||||
|
@ -305,11 +321,7 @@ func NewS3Storage() (*Driver, error) {
|
|||
Creds: credentials.NewStaticV4(access, secret, ""),
|
||||
Secure: secure,
|
||||
},
|
||||
GetOpts: minio.GetObjectOptions{},
|
||||
PutOpts: minio.PutObjectOptions{},
|
||||
PutChunkSize: 5 * 1024 * 1024, // 5MiB
|
||||
StatOpts: minio.StatObjectOptions{},
|
||||
RemoveOpts: minio.RemoveObjectOptions{},
|
||||
ListSize: 200,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -3,35 +3,9 @@ package s3
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"codeberg.org/gruf/go-storage"
|
||||
"codeberg.org/gruf/go-storage/internal"
|
||||
"github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
// transformS3Error transforms an error returned from S3Storage underlying
|
||||
// minio.Core client, by wrapping where necessary with our own error types.
|
||||
func transformS3Error(err error) error {
|
||||
// Cast this to a minio error response
|
||||
ersp, ok := err.(minio.ErrorResponse)
|
||||
if ok {
|
||||
switch ersp.Code {
|
||||
case "NoSuchKey":
|
||||
return internal.WrapErr(err, storage.ErrNotFound)
|
||||
case "Conflict":
|
||||
return internal.WrapErr(err, storage.ErrAlreadyExists)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check if error has an invalid object name prefix
|
||||
if strings.HasPrefix(err.Error(), "Object name ") {
|
||||
return internal.WrapErr(err, storage.ErrInvalidKey)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
errRsp, ok := err.(minio.ErrorResponse)
|
||||
return ok && errRsp.Code == "NoSuchKey"
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"codeberg.org/gruf/go-storage"
|
||||
"codeberg.org/gruf/go-storage/internal"
|
||||
|
@ -34,12 +35,7 @@ func DefaultConfig() Config {
|
|||
// immutable default configuration.
|
||||
var defaultConfig = Config{
|
||||
CoreOpts: minio.Options{},
|
||||
GetOpts: minio.GetObjectOptions{},
|
||||
PutOpts: minio.PutObjectOptions{},
|
||||
PutChunkOpts: minio.PutObjectPartOptions{},
|
||||
PutChunkSize: 4 * 1024 * 1024, // 4MiB
|
||||
StatOpts: minio.StatObjectOptions{},
|
||||
RemoveOpts: minio.RemoveObjectOptions{},
|
||||
ListSize: 200,
|
||||
}
|
||||
|
||||
|
@ -50,31 +46,11 @@ type Config struct {
|
|||
// passed during initialization.
|
||||
CoreOpts minio.Options
|
||||
|
||||
// GetOpts are S3 client options
|
||||
// passed during .Read___() calls.
|
||||
GetOpts minio.GetObjectOptions
|
||||
|
||||
// PutOpts are S3 client options
|
||||
// passed during .Write___() calls.
|
||||
PutOpts minio.PutObjectOptions
|
||||
|
||||
// PutChunkSize is the chunk size (in bytes)
|
||||
// to use when sending a byte stream reader
|
||||
// of unknown size as a multi-part object.
|
||||
PutChunkSize int64
|
||||
|
||||
// PutChunkOpts are S3 client options
|
||||
// passed during chunked .Write___() calls.
|
||||
PutChunkOpts minio.PutObjectPartOptions
|
||||
|
||||
// StatOpts are S3 client options
|
||||
// passed during .Stat() calls.
|
||||
StatOpts minio.StatObjectOptions
|
||||
|
||||
// RemoveOpts are S3 client options
|
||||
// passed during .Remove() calls.
|
||||
RemoveOpts minio.RemoveObjectOptions
|
||||
|
||||
// ListSize determines how many items
|
||||
// to include in each list request, made
|
||||
// during calls to .WalkKeys().
|
||||
|
@ -103,12 +79,8 @@ func getS3Config(cfg *Config) Config {
|
|||
|
||||
return Config{
|
||||
CoreOpts: cfg.CoreOpts,
|
||||
GetOpts: cfg.GetOpts,
|
||||
PutOpts: cfg.PutOpts,
|
||||
PutChunkSize: cfg.PutChunkSize,
|
||||
ListSize: cfg.ListSize,
|
||||
StatOpts: cfg.StatOpts,
|
||||
RemoveOpts: cfg.RemoveOpts,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,36 +155,50 @@ func (st *S3Storage) ReadBytes(ctx context.Context, key string) ([]byte, error)
|
|||
|
||||
// ReadStream: implements Storage.ReadStream().
|
||||
func (st *S3Storage) ReadStream(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||
// Fetch object reader from S3 bucket
|
||||
rc, _, _, err := st.client.GetObject(
|
||||
rc, _, _, err := st.GetObject(ctx, key, minio.GetObjectOptions{})
|
||||
return rc, err
|
||||
}
|
||||
|
||||
// GetObject wraps minio.Core{}.GetObject() to handle wrapping with our own storage library error types.
|
||||
func (st *S3Storage) GetObject(ctx context.Context, key string, opts minio.GetObjectOptions) (io.ReadCloser, minio.ObjectInfo, http.Header, error) {
|
||||
|
||||
// Query bucket for object data and info.
|
||||
rc, info, hdr, err := st.client.GetObject(
|
||||
ctx,
|
||||
st.bucket,
|
||||
key,
|
||||
st.config.GetOpts,
|
||||
opts,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
if isNotFoundError(err) {
|
||||
// Wrap not found errors as our not found type.
|
||||
err = internal.WrapErr(err, storage.ErrNotFound)
|
||||
} else if !isObjectNameError(err) {
|
||||
} else if isObjectNameError(err) {
|
||||
// Wrap object name errors as our invalid key type.
|
||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||
}
|
||||
|
||||
return nil, transformS3Error(err)
|
||||
}
|
||||
return rc, nil
|
||||
|
||||
return rc, info, hdr, err
|
||||
}
|
||||
|
||||
// WriteBytes: implements Storage.WriteBytes().
|
||||
func (st *S3Storage) WriteBytes(ctx context.Context, key string, value []byte) (int, error) {
|
||||
n, err := st.WriteStream(ctx, key, bytes.NewReader(value))
|
||||
return int(n), err
|
||||
info, err := st.PutObject(ctx, key, bytes.NewReader(value), minio.PutObjectOptions{})
|
||||
return int(info.Size), err
|
||||
}
|
||||
|
||||
// WriteStream: implements Storage.WriteStream().
|
||||
func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (int64, error) {
|
||||
info, err := st.PutObject(ctx, key, r, minio.PutObjectOptions{})
|
||||
return info.Size, err
|
||||
}
|
||||
|
||||
// PutObject wraps minio.Core{}.PutObject() to handle wrapping with our own storage library error types, and in the case of an io.Reader
|
||||
// that does not implement ReaderSize{}, it will instead handle upload by using minio.Core{}.NewMultipartUpload() in chunks of PutChunkSize.
|
||||
func (st *S3Storage) PutObject(ctx context.Context, key string, r io.Reader, opts minio.PutObjectOptions) (minio.UploadInfo, error) {
|
||||
if rs, ok := r.(ReaderSize); ok {
|
||||
// This reader supports providing us the size of
|
||||
// the encompassed data, allowing us to perform
|
||||
|
@ -225,22 +211,21 @@ func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (
|
|||
rs.Size(),
|
||||
"",
|
||||
"",
|
||||
st.config.PutOpts,
|
||||
opts,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
if isConflictError(err) {
|
||||
// Wrap conflict errors as our already exists type.
|
||||
err = internal.WrapErr(err, storage.ErrAlreadyExists)
|
||||
} else if !isObjectNameError(err) {
|
||||
} else if isObjectNameError(err) {
|
||||
// Wrap object name errors as our invalid key type.
|
||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return info.Size, nil
|
||||
return info, err
|
||||
}
|
||||
|
||||
// Start a new multipart upload to get ID.
|
||||
|
@ -248,24 +233,24 @@ func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (
|
|||
ctx,
|
||||
st.bucket,
|
||||
key,
|
||||
st.config.PutOpts,
|
||||
opts,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
if isConflictError(err) {
|
||||
// Wrap conflict errors as our already exists type.
|
||||
err = internal.WrapErr(err, storage.ErrAlreadyExists)
|
||||
} else if !isObjectNameError(err) {
|
||||
} else if isObjectNameError(err) {
|
||||
// Wrap object name errors as our invalid key type.
|
||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||
}
|
||||
|
||||
return 0, transformS3Error(err)
|
||||
return minio.UploadInfo{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
index = int(1) // parts index
|
||||
total = int64(0)
|
||||
index = int(1) // parts index
|
||||
parts []minio.CompletePart
|
||||
chunk = make([]byte, st.config.PutChunkSize)
|
||||
rbuf = bytes.NewReader(nil)
|
||||
|
@ -296,7 +281,7 @@ loop:
|
|||
|
||||
// All other errors.
|
||||
default:
|
||||
return 0, err
|
||||
return minio.UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Reset byte reader.
|
||||
|
@ -311,10 +296,13 @@ loop:
|
|||
index,
|
||||
rbuf,
|
||||
int64(n),
|
||||
st.config.PutChunkOpts,
|
||||
minio.PutObjectPartOptions{
|
||||
SSE: opts.ServerSideEncryption,
|
||||
DisableContentSha256: opts.DisableContentSha256,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return minio.UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Append completed part to slice.
|
||||
|
@ -327,101 +315,104 @@ loop:
|
|||
ChecksumSHA256: pt.ChecksumSHA256,
|
||||
})
|
||||
|
||||
// Update total.
|
||||
total += int64(n)
|
||||
|
||||
// Iterate.
|
||||
index++
|
||||
|
||||
// Update total size.
|
||||
total += pt.Size
|
||||
}
|
||||
|
||||
// Complete this multi-part upload operation
|
||||
_, err = st.client.CompleteMultipartUpload(
|
||||
info, err := st.client.CompleteMultipartUpload(
|
||||
ctx,
|
||||
st.bucket,
|
||||
key,
|
||||
uploadID,
|
||||
parts,
|
||||
st.config.PutOpts,
|
||||
opts,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return minio.UploadInfo{}, err
|
||||
}
|
||||
|
||||
return total, nil
|
||||
// Set correct size.
|
||||
info.Size = total
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Stat: implements Storage.Stat().
|
||||
func (st *S3Storage) Stat(ctx context.Context, key string) (*storage.Entry, error) {
|
||||
// Query object in S3 bucket.
|
||||
stat, err := st.client.StatObject(
|
||||
info, err := st.StatObject(ctx, key, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
if errors.Is(err, storage.ErrNotFound) {
|
||||
err = nil // mask not-found errors
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &storage.Entry{
|
||||
Key: key,
|
||||
Size: info.Size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StatObject wraps minio.Core{}.StatObject() to handle wrapping with our own storage library error types.
|
||||
func (st *S3Storage) StatObject(ctx context.Context, key string, opts minio.StatObjectOptions) (minio.ObjectInfo, error) {
|
||||
|
||||
// Query bucket for object info.
|
||||
info, err := st.client.StatObject(
|
||||
ctx,
|
||||
st.bucket,
|
||||
key,
|
||||
st.config.StatOpts,
|
||||
opts,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
if isNotFoundError(err) {
|
||||
// Ignore err return
|
||||
// for not-found.
|
||||
err = nil
|
||||
} else if !isObjectNameError(err) {
|
||||
// Wrap not found errors as our not found type.
|
||||
err = internal.WrapErr(err, storage.ErrNotFound)
|
||||
} else if isObjectNameError(err) {
|
||||
// Wrap object name errors as our invalid key type.
|
||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &storage.Entry{
|
||||
Key: key,
|
||||
Size: stat.Size,
|
||||
}, nil
|
||||
return info, err
|
||||
}
|
||||
|
||||
// Remove: implements Storage.Remove().
|
||||
func (st *S3Storage) Remove(ctx context.Context, key string) error {
|
||||
// Query object in S3 bucket.
|
||||
_, err := st.client.StatObject(
|
||||
ctx,
|
||||
st.bucket,
|
||||
key,
|
||||
st.config.StatOpts,
|
||||
)
|
||||
_, err := st.StatObject(ctx, key, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
|
||||
if isNotFoundError(err) {
|
||||
// Wrap not found errors as our not found type.
|
||||
err = internal.WrapErr(err, storage.ErrNotFound)
|
||||
} else if !isObjectNameError(err) {
|
||||
// Wrap object name errors as our invalid key type.
|
||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
return st.RemoveObject(ctx, key, minio.RemoveObjectOptions{})
|
||||
}
|
||||
|
||||
// RemoveObject wraps minio.Core{}.RemoveObject() to handle wrapping with our own storage library error types.
|
||||
func (st *S3Storage) RemoveObject(ctx context.Context, key string, opts minio.RemoveObjectOptions) error {
|
||||
|
||||
// Remove object from S3 bucket
|
||||
err = st.client.RemoveObject(
|
||||
err := st.client.RemoveObject(
|
||||
ctx,
|
||||
st.bucket,
|
||||
key,
|
||||
st.config.RemoveOpts,
|
||||
opts,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
||||
if isNotFoundError(err) {
|
||||
// Wrap not found errors as our not found type.
|
||||
err = internal.WrapErr(err, storage.ErrNotFound)
|
||||
} else if !isObjectNameError(err) {
|
||||
} else if isObjectNameError(err) {
|
||||
// Wrap object name errors as our invalid key type.
|
||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// WalkKeys: implements Storage.WalkKeys().
|
||||
|
|
|
@ -108,7 +108,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
|||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
withChecksum := c.trailingHeaderSupport
|
||||
if withChecksum {
|
||||
if opts.UserMetadata == nil {
|
||||
|
@ -304,6 +306,11 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
|||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
opts.SendContentMd5 = false
|
||||
}
|
||||
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
|
@ -463,7 +470,10 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
|||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.SendContentMd5 = false
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
|
@ -555,7 +565,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
|||
// Calculate md5sum.
|
||||
customHeader := make(http.Header)
|
||||
if !opts.SendContentMd5 {
|
||||
// Add CRC32C instead.
|
||||
// Add Checksum instead.
|
||||
crc.Reset()
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
|
@ -677,6 +687,9 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r
|
|||
if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
|
||||
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
|
||||
}
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.SendContentMd5 = false
|
||||
}
|
||||
|
||||
var readSeeker io.Seeker
|
||||
if size > 0 {
|
||||
|
@ -746,17 +759,6 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
|||
// Set headers.
|
||||
customHeader := opts.Header()
|
||||
|
||||
// Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
|
||||
addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
|
||||
|
||||
if addCrc {
|
||||
// If user has added checksums, don't add them ourselves.
|
||||
for k := range opts.UserMetadata {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
|
||||
addCrc = false
|
||||
}
|
||||
}
|
||||
}
|
||||
// Populate request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
|
@ -768,10 +770,23 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
|||
contentSHA256Hex: sha256Hex,
|
||||
streamSha256: !opts.DisableContentSha256,
|
||||
}
|
||||
if addCrc {
|
||||
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||
reqMetadata.addCrc = &opts.AutoChecksum
|
||||
// Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
|
||||
addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
|
||||
if opts.Checksum.IsSet() {
|
||||
reqMetadata.addCrc = &opts.Checksum
|
||||
} else if addCrc {
|
||||
// If user has added checksums, don't add them ourselves.
|
||||
for k := range opts.UserMetadata {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
|
||||
addCrc = false
|
||||
}
|
||||
}
|
||||
if addCrc {
|
||||
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||
reqMetadata.addCrc = &opts.AutoChecksum
|
||||
}
|
||||
}
|
||||
|
||||
if opts.Internal.SourceVersionID != "" {
|
||||
if opts.Internal.SourceVersionID != nullVersionID {
|
||||
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
|
||||
|
|
|
@ -94,6 +94,13 @@ type PutObjectOptions struct {
|
|||
// If none is specified CRC32C is used, since it is generally the fastest.
|
||||
AutoChecksum ChecksumType
|
||||
|
||||
// Checksum will force a checksum of the specific type.
|
||||
// This requires that the client was created with "TrailingHeaders:true" option,
|
||||
// and that the destination server supports it.
|
||||
// Unavailable with V2 signatures & Google endpoints.
|
||||
// This will disable content MD5 checksums if set.
|
||||
Checksum ChecksumType
|
||||
|
||||
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
|
||||
// fill them serially and upload them in parallel.
|
||||
// This can be used for faster uploads on non-seekable or slow-to-seek input.
|
||||
|
@ -240,7 +247,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
|||
}
|
||||
|
||||
// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
|
||||
func (opts PutObjectOptions) validate() (err error) {
|
||||
func (opts PutObjectOptions) validate(c *Client) (err error) {
|
||||
for k, v := range opts.UserMetadata {
|
||||
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) {
|
||||
return errInvalidArgument(k + " unsupported user defined metadata name")
|
||||
|
@ -255,6 +262,17 @@ func (opts PutObjectOptions) validate() (err error) {
|
|||
if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
|
||||
return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
|
||||
}
|
||||
if opts.Checksum.IsSet() {
|
||||
switch {
|
||||
case !c.trailingHeaderSupport:
|
||||
return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled")
|
||||
case c.overrideSignerType.IsV2():
|
||||
return errInvalidArgument("Checksum cannot be used with v2 signatures")
|
||||
case s3utils.IsGoogleEndpoint(*c.endpointURL):
|
||||
return errInvalidArgument("Checksum cannot be used with GCS endpoints")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -291,7 +309,7 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r
|
|||
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
|
||||
}
|
||||
|
||||
err = opts.validate()
|
||||
err = opts.validate(c)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
@ -333,7 +351,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
|
|||
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
||||
}
|
||||
|
||||
if size < int64(partSize) || opts.DisableMultipart {
|
||||
if size <= int64(partSize) || opts.DisableMultipart {
|
||||
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
|
||||
|
@ -362,6 +380,10 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
|||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.SendContentMd5 = false
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
|
|
|
@ -107,7 +107,7 @@ type readSeekCloser interface {
|
|||
// Total size should be < 5TB.
|
||||
// This function blocks until 'objs' is closed and the content has been uploaded.
|
||||
func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
|
||||
err = opts.Opts.validate()
|
||||
err = opts.Opts.validate(&c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ type Options struct {
|
|||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.76"
|
||||
libraryVersion = "v7.0.77"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
|
@ -661,7 +661,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
|||
// Initiate the request.
|
||||
res, err = c.do(req)
|
||||
if err != nil {
|
||||
if isRequestErrorRetryable(err) {
|
||||
if isRequestErrorRetryable(ctx, err) {
|
||||
// Retry the request
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func createHTTPTransport() (transport *http.Transport) {
|
|||
return nil
|
||||
}
|
||||
|
||||
if mustParseBool(os.Getenv(skipCERTValidation)) {
|
||||
if mustParseBool(os.Getenv(enableHTTPS)) && mustParseBool(os.Getenv(skipCERTValidation)) {
|
||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||
}
|
||||
|
||||
|
@ -2334,7 +2334,7 @@ func testPutObjectWithChecksums() {
|
|||
}
|
||||
|
||||
// Test PutObject with custom checksums.
|
||||
func testPutMultipartObjectWithChecksums() {
|
||||
func testPutObjectWithTrailingChecksums() {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
|
@ -2342,7 +2342,7 @@ func testPutMultipartObjectWithChecksums() {
|
|||
args := map[string]interface{}{
|
||||
"bucketName": "",
|
||||
"objectName": "",
|
||||
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
|
||||
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress, TrailChecksum: xxx}",
|
||||
}
|
||||
|
||||
if !isFullMode() {
|
||||
|
@ -2356,9 +2356,201 @@ func testPutMultipartObjectWithChecksums() {
|
|||
// Instantiate new minio client object.
|
||||
c, err := minio.New(os.Getenv(serverEndpoint),
|
||||
&minio.Options{
|
||||
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||
Transport: createHTTPTransport(),
|
||||
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||
Transport: createHTTPTransport(),
|
||||
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||
TrailingHeaders: true,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Enable tracing, write to stderr.
|
||||
// c.TraceOn(os.Stderr)
|
||||
|
||||
// Set user agent.
|
||||
c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
|
||||
|
||||
// Generate a new random bucket name.
|
||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||
args["bucketName"] = bucketName
|
||||
|
||||
// Make a new bucket.
|
||||
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Make bucket failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer cleanupBucket(bucketName, c)
|
||||
tests := []struct {
|
||||
cs minio.ChecksumType
|
||||
}{
|
||||
{cs: minio.ChecksumCRC32C},
|
||||
{cs: minio.ChecksumCRC32},
|
||||
{cs: minio.ChecksumSHA1},
|
||||
{cs: minio.ChecksumSHA256},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||
bufSize := dataFileMap["datafile-10-kB"]
|
||||
|
||||
// Save the data
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
cmpChecksum := func(got, want string) {
|
||||
if want != got {
|
||||
logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
meta := map[string]string{}
|
||||
reader := getDataReader("datafile-10-kB")
|
||||
b, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||
return
|
||||
}
|
||||
h := test.cs.Hasher()
|
||||
h.Reset()
|
||||
|
||||
// Test with Wrong CRC.
|
||||
args["metadata"] = meta
|
||||
args["range"] = "false"
|
||||
args["checksum"] = test.cs.String()
|
||||
|
||||
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
||||
DisableMultipart: true,
|
||||
DisableContentSha256: true,
|
||||
UserMetadata: meta,
|
||||
Checksum: test.cs,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
h.Write(b)
|
||||
meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
|
||||
cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||
cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||
cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||
cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||
|
||||
// Read the data back
|
||||
gopts := minio.GetObjectOptions{Checksum: true}
|
||||
|
||||
function = "GetObject(...)"
|
||||
r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := r.Stat()
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Stat failed", err)
|
||||
return
|
||||
}
|
||||
cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||
cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||
cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||
cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||
|
||||
if st.Size != int64(bufSize) {
|
||||
logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := r.Close(); err != nil {
|
||||
logError(testName, function, args, startTime, "", "Object Close failed", err)
|
||||
return
|
||||
}
|
||||
if err := r.Close(); err == nil {
|
||||
logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
|
||||
return
|
||||
}
|
||||
|
||||
function = "GetObject( Range...)"
|
||||
args["range"] = "true"
|
||||
err = gopts.SetRange(100, 1000)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "SetRange failed", err)
|
||||
return
|
||||
}
|
||||
r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
b, err = io.ReadAll(r)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||
return
|
||||
}
|
||||
st, err = r.Stat()
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Stat failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Range requests should return empty checksums...
|
||||
cmpChecksum(st.ChecksumSHA256, "")
|
||||
cmpChecksum(st.ChecksumSHA1, "")
|
||||
cmpChecksum(st.ChecksumCRC32, "")
|
||||
cmpChecksum(st.ChecksumCRC32C, "")
|
||||
|
||||
function = "GetObjectAttributes(...)"
|
||||
s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
|
||||
return
|
||||
}
|
||||
cmpChecksum(s.Checksum.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||
cmpChecksum(s.Checksum.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||
cmpChecksum(s.Checksum.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||
cmpChecksum(s.Checksum.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||
|
||||
delete(args, "range")
|
||||
delete(args, "metadata")
|
||||
}
|
||||
|
||||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Test PutObject with custom checksums.
|
||||
func testPutMultipartObjectWithChecksums(trailing bool) {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||
args := map[string]interface{}{
|
||||
"bucketName": "",
|
||||
"objectName": "",
|
||||
"opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Progress: progress Checksum: %v}", trailing),
|
||||
}
|
||||
|
||||
if !isFullMode() {
|
||||
logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
|
||||
return
|
||||
}
|
||||
|
||||
// Seed random based on current time.
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
// Instantiate new minio client object.
|
||||
c, err := minio.New(os.Getenv(serverEndpoint),
|
||||
&minio.Options{
|
||||
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||
Transport: createHTTPTransport(),
|
||||
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||
TrailingHeaders: trailing,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||
|
@ -2445,14 +2637,20 @@ func testPutMultipartObjectWithChecksums() {
|
|||
h.Reset()
|
||||
want := hashMultiPart(b, partSize, test.cs.Hasher())
|
||||
|
||||
var cs minio.ChecksumType
|
||||
rd := io.Reader(io.NopCloser(bytes.NewReader(b)))
|
||||
if trailing {
|
||||
cs = test.cs
|
||||
rd = bytes.NewReader(b)
|
||||
}
|
||||
// Set correct CRC.
|
||||
|
||||
resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{
|
||||
resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{
|
||||
DisableContentSha256: true,
|
||||
DisableMultipart: false,
|
||||
UserMetadata: nil,
|
||||
PartSize: partSize,
|
||||
AutoChecksum: test.cs,
|
||||
Checksum: cs,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
|
@ -2982,6 +3180,7 @@ func testGetObjectAttributes() {
|
|||
testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{
|
||||
ContentType: v.ContentType,
|
||||
SendContentMd5: v.SendContentMd5,
|
||||
Checksum: minio.ChecksumCRC32C,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
|
@ -3063,7 +3262,7 @@ func testGetObjectAttributes() {
|
|||
test: objectAttributesTestOptions{
|
||||
TestFileName: "file1",
|
||||
StorageClass: "STANDARD",
|
||||
HasFullChecksum: false,
|
||||
HasFullChecksum: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -3152,9 +3351,10 @@ func testGetObjectAttributesSSECEncryption() {
|
|||
|
||||
info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
|
||||
ContentType: "content/custom",
|
||||
SendContentMd5: true,
|
||||
SendContentMd5: false,
|
||||
ServerSideEncryption: sse,
|
||||
PartSize: uint64(bufSize) / 2,
|
||||
Checksum: minio.ChecksumCRC32C,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
|
@ -3174,9 +3374,9 @@ func testGetObjectAttributesSSECEncryption() {
|
|||
ETag: info.ETag,
|
||||
NumberOfParts: 2,
|
||||
ObjectSize: int(info.Size),
|
||||
HasFullChecksum: false,
|
||||
HasFullChecksum: true,
|
||||
HasParts: true,
|
||||
HasPartChecksums: false,
|
||||
HasPartChecksums: true,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err)
|
||||
|
@ -5594,18 +5794,12 @@ func testPresignedPostPolicy() {
|
|||
}
|
||||
writer.Close()
|
||||
|
||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
httpClient := &http.Client{
|
||||
// Setting a sensible time out of 30secs to wait for response
|
||||
// headers. Request is pro-actively canceled after 30secs
|
||||
// with no response.
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: transport,
|
||||
Transport: createHTTPTransport(),
|
||||
}
|
||||
args["url"] = presignedPostPolicyURL.String()
|
||||
|
||||
|
@ -7519,7 +7713,7 @@ func testFunctional() {
|
|||
return
|
||||
}
|
||||
|
||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
||||
transport := createHTTPTransport()
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
||||
return
|
||||
|
@ -12450,18 +12644,12 @@ func testFunctionalV2() {
|
|||
return
|
||||
}
|
||||
|
||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
httpClient := &http.Client{
|
||||
// Setting a sensible time out of 30secs to wait for response
|
||||
// headers. Request is pro-actively canceled after 30secs
|
||||
// with no response.
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: transport,
|
||||
Transport: createHTTPTransport(),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
|
||||
|
@ -13556,14 +13744,9 @@ func testCors() {
|
|||
bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
|
||||
objectURL := bucketURL + objectName
|
||||
|
||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
||||
return
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: transport,
|
||||
Transport: createHTTPTransport(),
|
||||
}
|
||||
|
||||
errStrAccessForbidden := `<Error><Code>AccessForbidden</Code><Message>CORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted`
|
||||
|
@ -14757,7 +14940,9 @@ func main() {
|
|||
testCompose10KSourcesV2()
|
||||
testUserMetadataCopyingV2()
|
||||
testPutObjectWithChecksums()
|
||||
testPutMultipartObjectWithChecksums()
|
||||
testPutObjectWithTrailingChecksums()
|
||||
testPutMultipartObjectWithChecksums(false)
|
||||
testPutMultipartObjectWithChecksums(true)
|
||||
testPutObject0ByteV2()
|
||||
testPutObjectNoLengthV2()
|
||||
testPutObjectsUnknownV2()
|
||||
|
|
|
@ -301,6 +301,25 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetUserMetadataStartsWith - Set how an user metadata should starts with.
|
||||
// Can be retrieved through a HEAD request or an event.
|
||||
func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
|
||||
if strings.TrimSpace(key) == "" || key == "" {
|
||||
return errInvalidArgument("Key is empty")
|
||||
}
|
||||
headerName := fmt.Sprintf("x-amz-meta-%s", key)
|
||||
policyCond := policyCondition{
|
||||
matchType: "starts-with",
|
||||
condition: fmt.Sprintf("$%s", headerName),
|
||||
value: value,
|
||||
}
|
||||
if err := p.addNewPolicy(policyCond); err != nil {
|
||||
return err
|
||||
}
|
||||
p.formData[headerName] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetChecksum sets the checksum of the request.
|
||||
func (p *PostPolicy) SetChecksum(c Checksum) {
|
||||
if c.IsSet() {
|
||||
|
|
|
@ -129,9 +129,10 @@ func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
|
|||
}
|
||||
|
||||
// For now, all http Do() requests are retriable except some well defined errors
|
||||
func isRequestErrorRetryable(err error) bool {
|
||||
func isRequestErrorRetryable(ctx context.Context, err error) bool {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return false
|
||||
// Retry if internal timeout in the HTTP call.
|
||||
return ctx.Err() == nil
|
||||
}
|
||||
if ue, ok := err.(*url.Error); ok {
|
||||
e := ue.Unwrap()
|
||||
|
|
|
@ -65,7 +65,7 @@ codeberg.org/gruf/go-runners
|
|||
# codeberg.org/gruf/go-sched v1.2.3
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-sched
|
||||
# codeberg.org/gruf/go-storage v0.1.2
|
||||
# codeberg.org/gruf/go-storage v0.2.0
|
||||
## explicit; go 1.22
|
||||
codeberg.org/gruf/go-storage
|
||||
codeberg.org/gruf/go-storage/disk
|
||||
|
@ -491,7 +491,7 @@ github.com/miekg/dns
|
|||
# github.com/minio/md5-simd v1.1.2
|
||||
## explicit; go 1.14
|
||||
github.com/minio/md5-simd
|
||||
# github.com/minio/minio-go/v7 v7.0.76
|
||||
# github.com/minio/minio-go/v7 v7.0.77
|
||||
## explicit; go 1.21
|
||||
github.com/minio/minio-go/v7
|
||||
github.com/minio/minio-go/v7/pkg/cors
|
||||
|
|
Loading…
Reference in New Issue