diff --git a/go.mod b/go.mod
index 2a0f9288a..322bd02ff 100644
--- a/go.mod
+++ b/go.mod
@@ -26,7 +26,7 @@ require (
github.com/gin-gonic/gin v1.9.0
github.com/go-fed/httpsig v1.1.0
github.com/go-playground/form/v4 v4.2.0
- github.com/go-playground/validator/v10 v10.13.0
+ github.com/go-playground/validator/v10 v10.14.0
github.com/google/uuid v1.3.0
github.com/gorilla/feeds v1.1.1
github.com/gorilla/websocket v1.5.0
@@ -99,6 +99,7 @@ require (
github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.1 // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
@@ -129,7 +130,7 @@ require (
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.16.3 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
- github.com/leodido/go-urn v1.2.3 // indirect
+ github.com/leodido/go-urn v1.2.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
diff --git a/go.sum b/go.sum
index 3e45a7985..f40867f23 100644
--- a/go.sum
+++ b/go.sum
@@ -188,6 +188,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg=
+github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
+github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -230,8 +232,8 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
-github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ=
-github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4=
+github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
+github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-session/session v3.1.2+incompatible/go.mod h1:8B3iivBQjrz/JtC68Np2T1yBBLxTan3mn/3OM0CyRt0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
@@ -427,8 +429,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
-github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
-github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
+github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
diff --git a/vendor/github.com/gabriel-vasile/mimetype/.gitattributes b/vendor/github.com/gabriel-vasile/mimetype/.gitattributes
new file mode 100644
index 000000000..0cc26ec01
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/.gitattributes
@@ -0,0 +1 @@
+testdata/* linguist-vendored
diff --git a/vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md b/vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..8479cd87d
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at vasile.gabriel@email.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
diff --git a/vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md b/vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md
new file mode 100644
index 000000000..56ae4e57c
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md
@@ -0,0 +1,12 @@
+## Contribute
+Contributions to **mimetype** are welcome. If you find an issue and you consider
+contributing, you can use the [Github issues tracker](https://github.com/gabriel-vasile/mimetype/issues)
+in order to report it, or better yet, open a pull request.
+
+Code contributions must respect these rules:
+ - code must be test covered
+ - code must be formatted using gofmt tool
+ - exported names must be documented
+
+**Important**: By submitting a pull request, you agree to allow the project
+owner to license your work under the same license as that used by the project.
diff --git a/vendor/github.com/gabriel-vasile/mimetype/LICENSE b/vendor/github.com/gabriel-vasile/mimetype/LICENSE
new file mode 100644
index 000000000..6aac070c7
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018-2020 Gabriel Vasile
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/gabriel-vasile/mimetype/README.md b/vendor/github.com/gabriel-vasile/mimetype/README.md
new file mode 100644
index 000000000..d310928de
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/README.md
@@ -0,0 +1,108 @@
+
+ mimetype
+
+
+
+ A package for detecting MIME types and extensions based on magic numbers
+
+
+## Features
+- fast and precise MIME type and file extension detection
+- long list of [supported MIME types](supported_mimes.md)
+- possibility to [extend](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-Extend) with other file formats
+- common file formats are prioritized
+- [text vs. binary files differentiation](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-TextVsBinary)
+- safe for concurrent usage
+
+## Install
+```bash
+go get github.com/gabriel-vasile/mimetype
+```
+
+## Usage
+```go
+mtype := mimetype.Detect([]byte)
+// OR
+mtype, err := mimetype.DetectReader(io.Reader)
+// OR
+mtype, err := mimetype.DetectFile("/path/to/file")
+fmt.Println(mtype.String(), mtype.Extension())
+```
+See the [runnable Go Playground examples](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#pkg-overview).
+
+## Usage'
+Only use libraries like **mimetype** as a last resort. Content type detection
+using magic numbers is slow, inaccurate, and non-standard. Most of the times
+protocols have methods for specifying such metadata; e.g., `Content-Type` header
+in HTTP and SMTP.
+
+## FAQ
+Q: My file is in the list of [supported MIME types](supported_mimes.md) but
+it is not correctly detected. What should I do?
+
+A: Some file formats (often Microsoft Office documents) keep their signatures
+towards the end of the file. Try increasing the number of bytes used for detection
+with:
+```go
+mimetype.SetLimit(1024*1024) // Set limit to 1MB.
+// or
+mimetype.SetLimit(0) // No limit, whole file content used.
+mimetype.DetectFile("file.doc")
+```
+If increasing the limit does not help, please
+[open an issue](https://github.com/gabriel-vasile/mimetype/issues/new?assignees=&labels=&template=mismatched-mime-type-detected.md&title=).
+
+## Structure
+**mimetype** uses a hierarchical structure to keep the MIME type detection logic.
+This reduces the number of calls needed for detecting the file type. The reason
+behind this choice is that there are file formats used as containers for other
+file formats. For example, Microsoft Office files are just zip archives,
+containing specific metadata files. Once a file has been identified as a
+zip, there is no need to check if it is a text file, but it is worth checking if
+it is an Microsoft Office file.
+
+To prevent loading entire files into memory, when detecting from a
+[reader](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectReader)
+or from a [file](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectFile)
+**mimetype** limits itself to reading only the header of the input.
+
+
+
+
+## Performance
+Thanks to the hierarchical structure, searching for common formats first,
+and limiting itself to file headers, **mimetype** matches the performance of
+stdlib `http.DetectContentType` while outperforming the alternative package.
+
+```bash
+ mimetype http.DetectContentType filetype
+BenchmarkMatchTar-24 250 ns/op 400 ns/op 3778 ns/op
+BenchmarkMatchZip-24 524 ns/op 351 ns/op 4884 ns/op
+BenchmarkMatchJpeg-24 103 ns/op 228 ns/op 839 ns/op
+BenchmarkMatchGif-24 139 ns/op 202 ns/op 751 ns/op
+BenchmarkMatchPng-24 165 ns/op 221 ns/op 1176 ns/op
+```
+
+## Contributing
+See [CONTRIBUTING.md](CONTRIBUTING.md).
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
new file mode 100644
index 000000000..0647f730e
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
@@ -0,0 +1,309 @@
+package charset
+
+import (
+ "bytes"
+ "encoding/xml"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/html"
+)
+
+const (
+ F = 0 /* character never appears in text */
+ T = 1 /* character appears in plain ASCII text */
+ I = 2 /* character appears in ISO-8859 text */
+ X = 3 /* character appears in non-ISO extended ASCII (Mac, IBM PC) */
+)
+
+var (
+ boms = []struct {
+ bom []byte
+ enc string
+ }{
+ {[]byte{0xEF, 0xBB, 0xBF}, "utf-8"},
+ {[]byte{0x00, 0x00, 0xFE, 0xFF}, "utf-32be"},
+ {[]byte{0xFF, 0xFE, 0x00, 0x00}, "utf-32le"},
+ {[]byte{0xFE, 0xFF}, "utf-16be"},
+ {[]byte{0xFF, 0xFE}, "utf-16le"},
+ }
+
+ // https://github.com/file/file/blob/fa93fb9f7d21935f1c7644c47d2975d31f12b812/src/encoding.c#L241
+ textChars = [256]byte{
+ /* BEL BS HT LF VT FF CR */
+ F, F, F, F, F, F, F, T, T, T, T, T, T, T, F, F, /* 0x0X */
+ /* ESC */
+ F, F, F, F, F, F, F, F, F, F, F, T, F, F, F, F, /* 0x1X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x2X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x3X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x4X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x5X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x6X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, F, /* 0x7X */
+ /* NEL */
+ X, X, X, X, X, T, X, X, X, X, X, X, X, X, X, X, /* 0x8X */
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 0x9X */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xaX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xbX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xcX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xdX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xeX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xfX */
+ }
+)
+
+// FromBOM returns the charset declared in the BOM of content.
+func FromBOM(content []byte) string {
+ for _, b := range boms {
+ if bytes.HasPrefix(content, b.bom) {
+ return b.enc
+ }
+ }
+ return ""
+}
+
+// FromPlain returns the charset of a plain text. It relies on BOM presence
+// and it falls back on checking each byte in content.
+func FromPlain(content []byte) string {
+ if len(content) == 0 {
+ return ""
+ }
+ if cset := FromBOM(content); cset != "" {
+ return cset
+ }
+ origContent := content
+ // Try to detect UTF-8.
+ // First eliminate any partial rune at the end.
+ for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
+ b := content[i]
+ if b < 0x80 {
+ break
+ }
+ if utf8.RuneStart(b) {
+ content = content[:i]
+ break
+ }
+ }
+ hasHighBit := false
+ for _, c := range content {
+ if c >= 0x80 {
+ hasHighBit = true
+ break
+ }
+ }
+ if hasHighBit && utf8.Valid(content) {
+ return "utf-8"
+ }
+
+ // ASCII is a subset of UTF8. Follow W3C recommendation and replace with UTF8.
+ if ascii(origContent) {
+ return "utf-8"
+ }
+
+ return latin(origContent)
+}
+
+func latin(content []byte) string {
+ hasControlBytes := false
+ for _, b := range content {
+ t := textChars[b]
+ if t != T && t != I {
+ return ""
+ }
+ if b >= 0x80 && b <= 0x9F {
+ hasControlBytes = true
+ }
+ }
+ // Code range 0x80 to 0x9F is reserved for control characters in ISO-8859-1
+ // (so-called C1 Controls). Windows 1252, however, has printable punctuation
+ // characters in this range.
+ if hasControlBytes {
+ return "windows-1252"
+ }
+ return "iso-8859-1"
+}
+
+func ascii(content []byte) bool {
+ for _, b := range content {
+ if textChars[b] != T {
+ return false
+ }
+ }
+ return true
+}
+
+// FromXML returns the charset of an XML document. It relies on the XML
+// header and falls back on the plain
+// text content.
+func FromXML(content []byte) string {
+ if cset := fromXML(content); cset != "" {
+ return cset
+ }
+ return FromPlain(content)
+}
+func fromXML(content []byte) string {
+ content = trimLWS(content)
+ dec := xml.NewDecoder(bytes.NewReader(content))
+ rawT, err := dec.RawToken()
+ if err != nil {
+ return ""
+ }
+
+ t, ok := rawT.(xml.ProcInst)
+ if !ok {
+ return ""
+ }
+
+ return strings.ToLower(xmlEncoding(string(t.Inst)))
+}
+
+// FromHTML returns the charset of an HTML document. It first looks if a BOM is
+// present and if so uses it to determine the charset. If no BOM is present,
+// it relies on the meta tag and falls back on the
+// plain text content.
+func FromHTML(content []byte) string {
+ if cset := FromBOM(content); cset != "" {
+ return cset
+ }
+ if cset := fromHTML(content); cset != "" {
+ return cset
+ }
+ return FromPlain(content)
+}
+
+func fromHTML(content []byte) string {
+ z := html.NewTokenizer(bytes.NewReader(content))
+ for {
+ switch z.Next() {
+ case html.ErrorToken:
+ return ""
+
+ case html.StartTagToken, html.SelfClosingTagToken:
+ tagName, hasAttr := z.TagName()
+ if !bytes.Equal(tagName, []byte("meta")) {
+ continue
+ }
+ attrList := make(map[string]bool)
+ gotPragma := false
+
+ const (
+ dontKnow = iota
+ doNeedPragma
+ doNotNeedPragma
+ )
+ needPragma := dontKnow
+
+ name := ""
+ for hasAttr {
+ var key, val []byte
+ key, val, hasAttr = z.TagAttr()
+ ks := string(key)
+ if attrList[ks] {
+ continue
+ }
+ attrList[ks] = true
+ for i, c := range val {
+ if 'A' <= c && c <= 'Z' {
+ val[i] = c + 0x20
+ }
+ }
+
+ switch ks {
+ case "http-equiv":
+ if bytes.Equal(val, []byte("content-type")) {
+ gotPragma = true
+ }
+
+ case "content":
+ name = fromMetaElement(string(val))
+ if name != "" {
+ needPragma = doNeedPragma
+ }
+
+ case "charset":
+ name = string(val)
+ needPragma = doNotNeedPragma
+ }
+ }
+
+ if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
+ continue
+ }
+
+ if strings.HasPrefix(name, "utf-16") {
+ name = "utf-8"
+ }
+
+ return name
+ }
+ }
+}
+
+func fromMetaElement(s string) string {
+ for s != "" {
+ csLoc := strings.Index(s, "charset")
+ if csLoc == -1 {
+ return ""
+ }
+ s = s[csLoc+len("charset"):]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if !strings.HasPrefix(s, "=") {
+ continue
+ }
+ s = s[1:]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if s == "" {
+ return ""
+ }
+ if q := s[0]; q == '"' || q == '\'' {
+ s = s[1:]
+ closeQuote := strings.IndexRune(s, rune(q))
+ if closeQuote == -1 {
+ return ""
+ }
+ return s[:closeQuote]
+ }
+
+ end := strings.IndexAny(s, "; \t\n\f\r")
+ if end == -1 {
+ end = len(s)
+ }
+ return s[:end]
+ }
+ return ""
+}
+
+func xmlEncoding(s string) string {
+ param := "encoding="
+ idx := strings.Index(s, param)
+ if idx == -1 {
+ return ""
+ }
+ v := s[idx+len(param):]
+ if v == "" {
+ return ""
+ }
+ if v[0] != '\'' && v[0] != '"' {
+ return ""
+ }
+ idx = strings.IndexRune(v[1:], rune(v[0]))
+ if idx == -1 {
+ return ""
+ }
+ return v[1 : idx+1]
+}
+
+// trimLWS trims whitespace from beginning of the input.
+// TODO: find a way to call trimLWS once per detection instead of once in each
+// detector which needs the trimmed input.
+func trimLWS(in []byte) []byte {
+ firstNonWS := 0
+ for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ {
+ }
+
+ return in[firstNonWS:]
+}
+
+func isWS(b byte) bool {
+ return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' '
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go b/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
new file mode 100644
index 000000000..ee39349ae
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
@@ -0,0 +1,544 @@
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package json provides a JSON value parser state machine.
+// This package is almost entirely copied from the Go stdlib.
+// Changes made to it permit users of the package to tell
+// if some slice of bytes is a valid beginning of a json string.
+package json
+
+import (
+ "fmt"
+)
+
+type (
+ scanStatus int
+)
+
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+
+ scanContinue scanStatus = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+
+ // This limits the max nesting depth to prevent stack overflow.
+ // This is permitted by https://tools.ietf.org/html/rfc7159#section-9
+ maxNestingDepth = 10000
+)
+
+type (
+ scanner struct {
+ step func(*scanner, byte) scanStatus
+ parseState []int
+ endTop bool
+ err error
+ index int
+ }
+)
+
+// Scan returns the number of bytes scanned and if there was any error
+// in trying to reach the end of data.
+func Scan(data []byte) (int, error) {
+ s := &scanner{}
+ _ = checkValid(data, s)
+ return s.index, s.err
+}
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.index++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+func isSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() scanStatus {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = fmt.Errorf("unexpected end of JSON input")
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
+func (s *scanner) pushParseState(c byte, newParseState int, successState scanStatus) scanStatus {
+ s.parseState = append(s.parseState, newParseState)
+ if len(s.parseState) <= maxNestingDepth {
+ return successState
+ }
+ return s.error(c, "exceeded max depth")
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ return s.pushParseState(c, parseObjectKey, scanBeginObject)
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ return s.pushParseState(c, parseArrayValue, scanBeginArray)
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) scanStatus {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) scanStatus {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) scanStatus {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) scanStatus {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) scanStatus {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) scanStatus {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) scanStatus {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) scanStatus {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) scanStatus {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) scanStatus {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) scanStatus {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) scanStatus {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) scanStatus {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) scanStatus {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) scanStatus {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) scanStatus {
+ s.step = stateError
+ s.err = fmt.Errorf("invalid character <<%c>> %s", c, context)
+ return scanError
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
new file mode 100644
index 000000000..fec11f080
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
@@ -0,0 +1,124 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ // SevenZ matches a 7z archive.
+ SevenZ = prefix([]byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C})
+ // Gzip matches gzip files based on http://www.zlib.org/rfc-gzip.html#header-trailer.
+ Gzip = prefix([]byte{0x1f, 0x8b})
+ // Fits matches an Flexible Image Transport System file.
+ Fits = prefix([]byte{
+ 0x53, 0x49, 0x4D, 0x50, 0x4C, 0x45, 0x20, 0x20, 0x3D, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x54,
+ })
+ // Xar matches an eXtensible ARchive format file.
+ Xar = prefix([]byte{0x78, 0x61, 0x72, 0x21})
+ // Bz2 matches a bzip2 file.
+ Bz2 = prefix([]byte{0x42, 0x5A, 0x68})
+ // Ar matches an ar (Unix) archive file.
+ Ar = prefix([]byte{0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E})
+ // Deb matches a Debian package file.
+ Deb = offset([]byte{
+ 0x64, 0x65, 0x62, 0x69, 0x61, 0x6E, 0x2D,
+ 0x62, 0x69, 0x6E, 0x61, 0x72, 0x79,
+ }, 8)
+ // Warc matches a Web ARChive file.
+ Warc = prefix([]byte("WARC/1.0"), []byte("WARC/1.1"))
+ // Cab matches a Microsoft Cabinet archive file.
+ Cab = prefix([]byte("MSCF\x00\x00\x00\x00"))
+ // Xz matches an xz compressed stream based on https://tukaani.org/xz/xz-file-format.txt.
+ Xz = prefix([]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00})
+ // Lzip matches an Lzip compressed file.
+ Lzip = prefix([]byte{0x4c, 0x5a, 0x49, 0x50})
+ // RPM matches an RPM or Delta RPM package file.
+ RPM = prefix([]byte{0xed, 0xab, 0xee, 0xdb}, []byte("drpm"))
+ // Cpio matches a cpio archive file.
+ Cpio = prefix([]byte("070707"), []byte("070701"), []byte("070702"))
+ // RAR matches a RAR archive file.
+ RAR = prefix([]byte("Rar!\x1A\x07\x00"), []byte("Rar!\x1A\x07\x01\x00"))
+)
+
+// InstallShieldCab matches an InstallShield Cabinet archive file.
+func InstallShieldCab(raw []byte, _ uint32) bool {
+ return len(raw) > 7 &&
+ bytes.Equal(raw[0:4], []byte("ISc(")) &&
+ raw[6] == 0 &&
+ (raw[7] == 1 || raw[7] == 2 || raw[7] == 4)
+}
+
+// Zstd matches a Zstandard archive file.
+func Zstd(raw []byte, limit uint32) bool {
+ return len(raw) >= 4 &&
+ (0x22 <= raw[0] && raw[0] <= 0x28 || raw[0] == 0x1E) && // Different Zstandard versions.
+ bytes.HasPrefix(raw[1:], []byte{0xB5, 0x2F, 0xFD})
+}
+
+// CRX matches a Chrome extension file: a zip archive prepended by a package header.
+func CRX(raw []byte, limit uint32) bool {
+ const minHeaderLen = 16
+ if len(raw) < minHeaderLen || !bytes.HasPrefix(raw, []byte("Cr24")) {
+ return false
+ }
+ pubkeyLen := binary.LittleEndian.Uint32(raw[8:12])
+ sigLen := binary.LittleEndian.Uint32(raw[12:16])
+ zipOffset := minHeaderLen + pubkeyLen + sigLen
+ if uint32(len(raw)) < zipOffset {
+ return false
+ }
+ return Zip(raw[zipOffset:], limit)
+}
+
+// Tar matches a (t)ape (ar)chive file.
+func Tar(raw []byte, _ uint32) bool {
+ // The "magic" header field for files in in UStar (POSIX IEEE P1003.1) archives
+ // has the prefix "ustar". The values of the remaining bytes in this field vary
+ // by archiver implementation.
+ if len(raw) >= 512 && bytes.HasPrefix(raw[257:], []byte{0x75, 0x73, 0x74, 0x61, 0x72}) {
+ return true
+ }
+
+ if len(raw) < 256 {
+ return false
+ }
+
+ // The older v7 format has no "magic" field, and therefore must be identified
+ // with heuristics based on legal ranges of values for other header fields:
+ // https://www.nationalarchives.gov.uk/PRONOM/Format/proFormatSearch.aspx?status=detailReport&id=385&strPageToDisplay=signatures
+ rules := []struct {
+ min, max uint8
+ i int
+ }{
+ {0x21, 0xEF, 0},
+ {0x30, 0x37, 105},
+ {0x20, 0x37, 106},
+ {0x00, 0x00, 107},
+ {0x30, 0x37, 113},
+ {0x20, 0x37, 114},
+ {0x00, 0x00, 115},
+ {0x30, 0x37, 121},
+ {0x20, 0x37, 122},
+ {0x00, 0x00, 123},
+ {0x30, 0x37, 134},
+ {0x30, 0x37, 146},
+ {0x30, 0x37, 153},
+ {0x00, 0x37, 154},
+ }
+ for _, r := range rules {
+ if raw[r.i] < r.min || raw[r.i] > r.max {
+ return false
+ }
+ }
+
+ for _, i := range []uint8{135, 147, 155} {
+ if raw[i] != 0x00 && raw[i] != 0x20 {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
new file mode 100644
index 000000000..d17e32482
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
@@ -0,0 +1,76 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ // Flac matches a Free Lossless Audio Codec file.
+ Flac = prefix([]byte("\x66\x4C\x61\x43\x00\x00\x00\x22"))
+ // Midi matches a Musical Instrument Digital Interface file.
+ Midi = prefix([]byte("\x4D\x54\x68\x64"))
+ // Ape matches a Monkey's Audio file.
+ Ape = prefix([]byte("\x4D\x41\x43\x20\x96\x0F\x00\x00\x34\x00\x00\x00\x18\x00\x00\x00\x90\xE3"))
+ // MusePack matches a Musepack file.
+ MusePack = prefix([]byte("MPCK"))
+ // Au matches a Sun Microsystems au file.
+ Au = prefix([]byte("\x2E\x73\x6E\x64"))
+ // Amr matches an Adaptive Multi-Rate file.
+ Amr = prefix([]byte("\x23\x21\x41\x4D\x52"))
+ // Voc matches a Creative Voice file.
+ Voc = prefix([]byte("Creative Voice File"))
+ // M3u matches a Playlist file.
+ M3u = prefix([]byte("#EXTM3U"))
+ // AAC matches an Advanced Audio Coding file.
+ AAC = prefix([]byte{0xFF, 0xF1}, []byte{0xFF, 0xF9})
+)
+
+// Mp3 matches an mp3 file.
+func Mp3(raw []byte, limit uint32) bool {
+ if len(raw) < 3 {
+ return false
+ }
+
+ if bytes.HasPrefix(raw, []byte("ID3")) {
+ // MP3s with an ID3v2 tag will start with "ID3"
+ // ID3v1 tags, however appear at the end of the file.
+ return true
+ }
+
+ // Match MP3 files without tags
+ switch binary.BigEndian.Uint16(raw[:2]) & 0xFFFE {
+ case 0xFFFA:
+ // MPEG ADTS, layer III, v1
+ return true
+ case 0xFFF2:
+ // MPEG ADTS, layer III, v2
+ return true
+ case 0xFFE2:
+ // MPEG ADTS, layer III, v2.5
+ return true
+ }
+
+ return false
+}
+
+// Wav matches a Waveform Audio File Format file.
+func Wav(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte{0x57, 0x41, 0x56, 0x45})
+}
+
+// Aiff matches Audio Interchange File Format file.
+func Aiff(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte{0x46, 0x4F, 0x52, 0x4D}) &&
+ bytes.Equal(raw[8:12], []byte{0x41, 0x49, 0x46, 0x46})
+}
+
+// Qcp matches a Qualcomm Pure Voice file.
+func Qcp(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte("QLCM"))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
new file mode 100644
index 000000000..29bdded3e
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
@@ -0,0 +1,196 @@
+package magic
+
+import (
+ "bytes"
+ "debug/macho"
+ "encoding/binary"
+)
+
+var (
+ // Lnk matches Microsoft lnk binary format.
+ Lnk = prefix([]byte{0x4C, 0x00, 0x00, 0x00, 0x01, 0x14, 0x02, 0x00})
+ // Wasm matches a web assembly File Format file.
+ Wasm = prefix([]byte{0x00, 0x61, 0x73, 0x6D})
+ // Exe matches a Windows/DOS executable file.
+ Exe = prefix([]byte{0x4D, 0x5A})
+ // Elf matches an Executable and Linkable Format file.
+ Elf = prefix([]byte{0x7F, 0x45, 0x4C, 0x46})
+ // Nes matches a Nintendo Entertainment system ROM file.
+ Nes = prefix([]byte{0x4E, 0x45, 0x53, 0x1A})
+ // SWF matches an Adobe Flash swf file.
+ SWF = prefix([]byte("CWS"), []byte("FWS"), []byte("ZWS"))
+ // Torrent has bencoded text in the beginning.
+ Torrent = prefix([]byte("d8:announce"))
+)
+
+// Java bytecode and Mach-O binaries share the same magic number.
+// More info here https://github.com/threatstack/libmagic/blob/master/magic/Magdir/cafebabe
+func classOrMachOFat(in []byte) bool {
+ // There should be at least 8 bytes for both of them because the only way to
+ // quickly distinguish them is by comparing byte at position 7
+ if len(in) < 8 {
+ return false
+ }
+
+ return bytes.HasPrefix(in, []byte{0xCA, 0xFE, 0xBA, 0xBE})
+}
+
+// Class matches a java class file.
+func Class(raw []byte, limit uint32) bool {
+ return classOrMachOFat(raw) && raw[7] > 30
+}
+
+// MachO matches Mach-O binaries format.
+func MachO(raw []byte, limit uint32) bool {
+ if classOrMachOFat(raw) && raw[7] < 20 {
+ return true
+ }
+
+ if len(raw) < 4 {
+ return false
+ }
+
+ be := binary.BigEndian.Uint32(raw)
+ le := binary.LittleEndian.Uint32(raw)
+
+ return be == macho.Magic32 ||
+ le == macho.Magic32 ||
+ be == macho.Magic64 ||
+ le == macho.Magic64
+}
+
+// Dbf matches a dBase file.
+// https://www.dbase.com/Knowledgebase/INT/db7_file_fmt.htm
+func Dbf(raw []byte, limit uint32) bool {
+ if len(raw) < 68 {
+ return false
+ }
+
+ // 3rd and 4th bytes contain the last update month and day of month.
+ if !(0 < raw[2] && raw[2] < 13 && 0 < raw[3] && raw[3] < 32) {
+ return false
+ }
+
+ // 12, 13, 30, 31 are reserved bytes and always filled with 0x00.
+ if raw[12] != 0x00 || raw[13] != 0x00 || raw[30] != 0x00 || raw[31] != 0x00 {
+ return false
+ }
+ // Production MDX flag;
+ // 0x01 if a production .MDX file exists for this table;
+ // 0x00 if no .MDX file exists.
+ if raw[28] > 0x01 {
+ return false
+ }
+
+ // dbf type is dictated by the first byte.
+ dbfTypes := []byte{
+ 0x02, 0x03, 0x04, 0x05, 0x30, 0x31, 0x32, 0x42, 0x62, 0x7B, 0x82,
+ 0x83, 0x87, 0x8A, 0x8B, 0x8E, 0xB3, 0xCB, 0xE5, 0xF5, 0xF4, 0xFB,
+ }
+ for _, b := range dbfTypes {
+ if raw[0] == b {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ElfObj matches an object file.
+func ElfObj(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x01 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x01))
+}
+
+// ElfExe matches an executable file.
+func ElfExe(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x02 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x02))
+}
+
+// ElfLib matches a shared library file.
+func ElfLib(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x03 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x03))
+}
+
+// ElfDump matches a core dump file.
+func ElfDump(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x04 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x04))
+}
+
+// Dcm matches a DICOM medical format file.
+func Dcm(raw []byte, limit uint32) bool {
+ return len(raw) > 131 &&
+ bytes.Equal(raw[128:132], []byte{0x44, 0x49, 0x43, 0x4D})
+}
+
+// Marc matches a MARC21 (MAchine-Readable Cataloging) file.
+func Marc(raw []byte, limit uint32) bool {
+ // File is at least 24 bytes ("leader" field size).
+ if len(raw) < 24 {
+ return false
+ }
+
+ // Fixed bytes at offset 20.
+ if !bytes.Equal(raw[20:24], []byte("4500")) {
+ return false
+ }
+
+ // First 5 bytes are ASCII digits.
+ for i := 0; i < 5; i++ {
+ if raw[i] < '0' || raw[i] > '9' {
+ return false
+ }
+ }
+
+ // Field terminator is present in first 2048 bytes.
+ return bytes.Contains(raw[:min(2048, len(raw))], []byte{0x1E})
+}
+
+// Glb matches a glTF model format file.
+// GLB is the binary file format representation of 3D models save in
+// the GL transmission Format (glTF).
+// see more: https://docs.fileformat.com/3d/glb/
+// https://www.iana.org/assignments/media-types/model/gltf-binary
+// GLB file format is based on little endian and its header structure
+// show below:
+//
+// <-- 12-byte header -->
+// | magic | version | length |
+// | (uint32) | (uint32) | (uint32) |
+// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... |
+// | g l T F | 1 | ... |
+var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"),
+ []byte("\x67\x6C\x54\x46\x01\x00\x00\x00"))
+
+// TzIf matches a Time Zone Information Format (TZif) file.
+// See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3
+// Its header structure is shown below:
+// +---------------+---+
+// | magic (4) | <-+-- version (1)
+// +---------------+---+---------------------------------------+
+// | [unused - reserved for future use] (15) |
+// +---------------+---------------+---------------+-----------+
+// | isutccnt (4) | isstdcnt (4) | leapcnt (4) |
+// +---------------+---------------+---------------+
+// | timecnt (4) | typecnt (4) | charcnt (4) |
+func TzIf(raw []byte, limit uint32) bool {
+ // File is at least 44 bytes (header size).
+ if len(raw) < 44 {
+ return false
+ }
+
+ if !bytes.HasPrefix(raw, []byte("TZif")) {
+ return false
+ }
+
+ // Field "typecnt" MUST not be zero.
+ if binary.BigEndian.Uint32(raw[36:40]) == 0 {
+ return false
+ }
+
+ // Version has to be NUL (0x00), '2' (0x32) or '3' (0x33).
+ return raw[4] == 0x00 || raw[4] == 0x32 || raw[4] == 0x33
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
new file mode 100644
index 000000000..cb1fed12f
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
@@ -0,0 +1,13 @@
+package magic
+
+var (
+ // Sqlite matches an SQLite database file.
+ Sqlite = prefix([]byte{
+ 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00,
+ })
+ // MsAccessAce matches Microsoft Access dababase file.
+ MsAccessAce = offset([]byte("Standard ACE DB"), 4)
+ // MsAccessMdb matches legacy Microsoft Access database file (JET, 2003 and earlier).
+ MsAccessMdb = offset([]byte("Standard Jet DB"), 4)
+)
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
new file mode 100644
index 000000000..b3b26d5a1
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
@@ -0,0 +1,62 @@
+package magic
+
+import "bytes"
+
+var (
+ // Pdf matches a Portable Document Format file.
+ // https://github.com/file/file/blob/11010cc805546a3e35597e67e1129a481aed40e8/magic/Magdir/pdf
+ Pdf = prefix(
+ // usual pdf signature
+ []byte("%PDF-"),
+ // new-line prefixed signature
+ []byte("\012%PDF-"),
+ // UTF-8 BOM prefixed signature
+ []byte("\xef\xbb\xbf%PDF-"),
+ )
+ // Fdf matches a Forms Data Format file.
+ Fdf = prefix([]byte("%FDF"))
+ // Mobi matches a Mobi file.
+ Mobi = offset([]byte("BOOKMOBI"), 60)
+ // Lit matches a Microsoft Lit file.
+ Lit = prefix([]byte("ITOLITLS"))
+)
+
+// DjVu matches a DjVu file.
+func DjVu(raw []byte, limit uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ if !bytes.HasPrefix(raw, []byte{0x41, 0x54, 0x26, 0x54, 0x46, 0x4F, 0x52, 0x4D}) {
+ return false
+ }
+ return bytes.HasPrefix(raw[12:], []byte("DJVM")) ||
+ bytes.HasPrefix(raw[12:], []byte("DJVU")) ||
+ bytes.HasPrefix(raw[12:], []byte("DJVI")) ||
+ bytes.HasPrefix(raw[12:], []byte("THUM"))
+}
+
+// P7s matches an .p7s signature File (PEM, Base64).
+func P7s(raw []byte, limit uint32) bool {
+ // Check for PEM Encoding.
+ if bytes.HasPrefix(raw, []byte("-----BEGIN PKCS7")) {
+ return true
+ }
+ // Check if DER Encoding is long enough.
+ if len(raw) < 20 {
+ return false
+ }
+ // Magic Bytes for the signedData ASN.1 encoding.
+ startHeader := [][]byte{{0x30, 0x80}, {0x30, 0x81}, {0x30, 0x82}, {0x30, 0x83}, {0x30, 0x84}}
+ signedDataMatch := []byte{0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07}
+ // Check if Header is correct. There are multiple valid headers.
+ for i, match := range startHeader {
+ // If first bytes match, then check for ASN.1 Object Type.
+ if bytes.HasPrefix(raw, match) {
+ if bytes.HasPrefix(raw[i+2:], signedDataMatch) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
new file mode 100644
index 000000000..43af28212
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
@@ -0,0 +1,39 @@
+package magic
+
+import (
+ "bytes"
+)
+
+var (
+ // Woff matches a Web Open Font Format file.
+ Woff = prefix([]byte("wOFF"))
+ // Woff2 matches a Web Open Font Format version 2 file.
+ Woff2 = prefix([]byte("wOF2"))
+ // Otf matches an OpenType font file.
+ Otf = prefix([]byte{0x4F, 0x54, 0x54, 0x4F, 0x00})
+)
+
+// Ttf matches a TrueType font file.
+func Ttf(raw []byte, limit uint32) bool {
+ if !bytes.HasPrefix(raw, []byte{0x00, 0x01, 0x00, 0x00}) {
+ return false
+ }
+ return !MsAccessAce(raw, limit) && !MsAccessMdb(raw, limit)
+}
+
+// Eot matches an Embedded OpenType font file.
+func Eot(raw []byte, limit uint32) bool {
+ return len(raw) > 35 &&
+ bytes.Equal(raw[34:36], []byte{0x4C, 0x50}) &&
+ (bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x01}) ||
+ bytes.Equal(raw[8:11], []byte{0x01, 0x00, 0x00}) ||
+ bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x02}))
+}
+
+// Ttc matches a TrueType Collection font file.
+func Ttc(raw []byte, limit uint32) bool {
+ return len(raw) > 7 &&
+ bytes.HasPrefix(raw, []byte("ttcf")) &&
+ (bytes.Equal(raw[4:8], []byte{0x00, 0x01, 0x00, 0x00}) ||
+ bytes.Equal(raw[4:8], []byte{0x00, 0x02, 0x00, 0x00}))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
new file mode 100644
index 000000000..6575b4aec
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
@@ -0,0 +1,88 @@
+package magic
+
+import "bytes"
+
+var (
+ // AVIF matches an AV1 Image File Format still or animated.
+ // Wikipedia page seems outdated listing image/avif-sequence for animations.
+ // https://github.com/AOMediaCodec/av1-avif/issues/59
+ AVIF = ftyp([]byte("avif"), []byte("avis"))
+ // Mp4 matches an MP4 file.
+ Mp4 = ftyp(
+ []byte("avc1"), []byte("dash"), []byte("iso2"), []byte("iso3"),
+ []byte("iso4"), []byte("iso5"), []byte("iso6"), []byte("isom"),
+ []byte("mmp4"), []byte("mp41"), []byte("mp42"), []byte("mp4v"),
+ []byte("mp71"), []byte("MSNV"), []byte("NDAS"), []byte("NDSC"),
+ []byte("NSDC"), []byte("NSDH"), []byte("NDSM"), []byte("NDSP"),
+ []byte("NDSS"), []byte("NDXC"), []byte("NDXH"), []byte("NDXM"),
+ []byte("NDXP"), []byte("NDXS"), []byte("F4V "), []byte("F4P "),
+ )
+ // ThreeGP matches a 3GPP file.
+ ThreeGP = ftyp(
+ []byte("3gp1"), []byte("3gp2"), []byte("3gp3"), []byte("3gp4"),
+ []byte("3gp5"), []byte("3gp6"), []byte("3gp7"), []byte("3gs7"),
+ []byte("3ge6"), []byte("3ge7"), []byte("3gg6"),
+ )
+ // ThreeG2 matches a 3GPP2 file.
+ ThreeG2 = ftyp(
+ []byte("3g24"), []byte("3g25"), []byte("3g26"), []byte("3g2a"),
+ []byte("3g2b"), []byte("3g2c"), []byte("KDDI"),
+ )
+ // AMp4 matches an audio MP4 file.
+ AMp4 = ftyp(
+ // audio for Adobe Flash Player 9+
+ []byte("F4A "), []byte("F4B "),
+ // Apple iTunes AAC-LC (.M4A) Audio
+ []byte("M4B "), []byte("M4P "),
+ // MPEG-4 (.MP4) for SonyPSP
+ []byte("MSNV"),
+ // Nero Digital AAC Audio
+ []byte("NDAS"),
+ )
+ // Mqv matches a Sony / Mobile QuickTime file.
+ Mqv = ftyp([]byte("mqt "))
+ // M4a matches an audio M4A file.
+ M4a = ftyp([]byte("M4A "))
+ // M4v matches an Appl4 M4V video file.
+ M4v = ftyp([]byte("M4V "), []byte("M4VH"), []byte("M4VP"))
+ // Heic matches a High Efficiency Image Coding (HEIC) file.
+ Heic = ftyp([]byte("heic"), []byte("heix"))
+ // HeicSequence matches a High Efficiency Image Coding (HEIC) file sequence.
+ HeicSequence = ftyp([]byte("hevc"), []byte("hevx"))
+ // Heif matches a High Efficiency Image File Format (HEIF) file.
+ Heif = ftyp([]byte("mif1"), []byte("heim"), []byte("heis"), []byte("avic"))
+ // HeifSequence matches a High Efficiency Image File Format (HEIF) file sequence.
+ HeifSequence = ftyp([]byte("msf1"), []byte("hevm"), []byte("hevs"), []byte("avcs"))
+ // TODO: add support for remaining video formats at ftyps.com.
+)
+
+// QuickTime matches a QuickTime File Format file.
+// https://www.loc.gov/preservation/digital/formats/fdd/fdd000052.shtml
+// https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html#//apple_ref/doc/uid/TP40000939-CH203-38190
+// https://github.com/apache/tika/blob/0f5570691133c75ac4472c3340354a6c4080b104/tika-core/src/main/resources/org/apache/tika/mime/tika-mimetypes.xml#L7758-L7777
+func QuickTime(raw []byte, _ uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ // First 4 bytes represent the size of the atom as unsigned int.
+ // Next 4 bytes are the type of the atom.
+ // For `ftyp` atoms check if first byte in size is 0, otherwise, a text file
+ // which happens to contain 'ftypqt ' at index 4 will trigger a false positive.
+ if bytes.Equal(raw[4:12], []byte("ftypqt ")) ||
+ bytes.Equal(raw[4:12], []byte("ftypmoov")) {
+ return raw[0] == 0x00
+ }
+ basicAtomTypes := [][]byte{
+ []byte("moov\x00"),
+ []byte("mdat\x00"),
+ []byte("free\x00"),
+ []byte("skip\x00"),
+ []byte("pnot\x00"),
+ }
+ for _, a := range basicAtomTypes {
+ if bytes.Equal(raw[4:9], a) {
+ return true
+ }
+ }
+ return bytes.Equal(raw[:8], []byte("\x00\x00\x00\x08wide"))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
new file mode 100644
index 000000000..f077e1672
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
@@ -0,0 +1,55 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// Shp matches a shape format file.
+// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf
+func Shp(raw []byte, limit uint32) bool {
+ if len(raw) < 112 {
+ return false
+ }
+
+ if !(binary.BigEndian.Uint32(raw[0:4]) == 9994 &&
+ binary.BigEndian.Uint32(raw[4:8]) == 0 &&
+ binary.BigEndian.Uint32(raw[8:12]) == 0 &&
+ binary.BigEndian.Uint32(raw[12:16]) == 0 &&
+ binary.BigEndian.Uint32(raw[16:20]) == 0 &&
+ binary.BigEndian.Uint32(raw[20:24]) == 0 &&
+ binary.LittleEndian.Uint32(raw[28:32]) == 1000) {
+ return false
+ }
+
+ shapeTypes := []int{
+ 0, // Null shape
+ 1, // Point
+ 3, // Polyline
+ 5, // Polygon
+ 8, // MultiPoint
+ 11, // PointZ
+ 13, // PolylineZ
+ 15, // PolygonZ
+ 18, // MultiPointZ
+ 21, // PointM
+ 23, // PolylineM
+ 25, // PolygonM
+ 28, // MultiPointM
+ 31, // MultiPatch
+ }
+
+ for _, st := range shapeTypes {
+ if st == int(binary.LittleEndian.Uint32(raw[108:112])) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Shx matches a shape index format file.
+// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf
+func Shx(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x27, 0x0A})
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
new file mode 100644
index 000000000..0eb7e95f3
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
@@ -0,0 +1,110 @@
+package magic
+
+import "bytes"
+
+var (
+ // Png matches a Portable Network Graphics file.
+ // https://www.w3.org/TR/PNG/
+ Png = prefix([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A})
+ // Apng matches an Animated Portable Network Graphics file.
+ // https://wiki.mozilla.org/APNG_Specification
+ Apng = offset([]byte("acTL"), 37)
+ // Jpg matches a Joint Photographic Experts Group file.
+ Jpg = prefix([]byte{0xFF, 0xD8, 0xFF})
+ // Jp2 matches a JPEG 2000 Image file (ISO 15444-1).
+ Jp2 = jpeg2k([]byte{0x6a, 0x70, 0x32, 0x20})
+ // Jpx matches a JPEG 2000 Image file (ISO 15444-2).
+ Jpx = jpeg2k([]byte{0x6a, 0x70, 0x78, 0x20})
+ // Jpm matches a JPEG 2000 Image file (ISO 15444-6).
+ Jpm = jpeg2k([]byte{0x6a, 0x70, 0x6D, 0x20})
+ // Gif matches a Graphics Interchange Format file.
+ Gif = prefix([]byte("GIF87a"), []byte("GIF89a"))
+ // Bmp matches a bitmap image file.
+ Bmp = prefix([]byte{0x42, 0x4D})
+ // Ps matches a PostScript file.
+ Ps = prefix([]byte("%!PS-Adobe-"))
+ // Psd matches a Photoshop Document file.
+ Psd = prefix([]byte("8BPS"))
+ // Ico matches an ICO file.
+ Ico = prefix([]byte{0x00, 0x00, 0x01, 0x00}, []byte{0x00, 0x00, 0x02, 0x00})
+ // Icns matches an ICNS (Apple Icon Image format) file.
+ Icns = prefix([]byte("icns"))
+ // Tiff matches a Tagged Image File Format file.
+ Tiff = prefix([]byte{0x49, 0x49, 0x2A, 0x00}, []byte{0x4D, 0x4D, 0x00, 0x2A})
+ // Bpg matches a Better Portable Graphics file.
+ Bpg = prefix([]byte{0x42, 0x50, 0x47, 0xFB})
+ // Xcf matches GIMP image data.
+ Xcf = prefix([]byte("gimp xcf"))
+ // Pat matches GIMP pattern data.
+ Pat = offset([]byte("GPAT"), 20)
+ // Gbr matches GIMP brush data.
+ Gbr = offset([]byte("GIMP"), 20)
+ // Hdr matches Radiance HDR image.
+ // https://web.archive.org/web/20060913152809/http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/
+ Hdr = prefix([]byte("#?RADIANCE\n"))
+ // Xpm matches X PixMap image data.
+ Xpm = prefix([]byte{0x2F, 0x2A, 0x20, 0x58, 0x50, 0x4D, 0x20, 0x2A, 0x2F})
+ // Jxs matches a JPEG XS coded image file (ISO/IEC 21122-3).
+ Jxs = prefix([]byte{0x00, 0x00, 0x00, 0x0C, 0x4A, 0x58, 0x53, 0x20, 0x0D, 0x0A, 0x87, 0x0A})
+ // Jxr matches Microsoft HD JXR photo file.
+ Jxr = prefix([]byte{0x49, 0x49, 0xBC, 0x01})
+)
+
+func jpeg2k(sig []byte) Detector {
+ return func(raw []byte, _ uint32) bool {
+ if len(raw) < 24 {
+ return false
+ }
+
+ if !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x20, 0x20}) &&
+ !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x32, 0x20}) {
+ return false
+ }
+ return bytes.Equal(raw[20:24], sig)
+ }
+}
+
+// Webp matches a WebP file.
+func Webp(raw []byte, _ uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[0:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte{0x57, 0x45, 0x42, 0x50})
+}
+
+// Dwg matches a CAD drawing file.
+func Dwg(raw []byte, _ uint32) bool {
+ if len(raw) < 6 || raw[0] != 0x41 || raw[1] != 0x43 {
+ return false
+ }
+ dwgVersions := [][]byte{
+ {0x31, 0x2E, 0x34, 0x30},
+ {0x31, 0x2E, 0x35, 0x30},
+ {0x32, 0x2E, 0x31, 0x30},
+ {0x31, 0x30, 0x30, 0x32},
+ {0x31, 0x30, 0x30, 0x33},
+ {0x31, 0x30, 0x30, 0x34},
+ {0x31, 0x30, 0x30, 0x36},
+ {0x31, 0x30, 0x30, 0x39},
+ {0x31, 0x30, 0x31, 0x32},
+ {0x31, 0x30, 0x31, 0x34},
+ {0x31, 0x30, 0x31, 0x35},
+ {0x31, 0x30, 0x31, 0x38},
+ {0x31, 0x30, 0x32, 0x31},
+ {0x31, 0x30, 0x32, 0x34},
+ {0x31, 0x30, 0x33, 0x32},
+ }
+
+ for _, d := range dwgVersions {
+ if bytes.Equal(raw[2:6], d) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Jxl matches JPEG XL image file.
+func Jxl(raw []byte, _ uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0xFF, 0x0A}) ||
+ bytes.HasPrefix(raw, []byte("\x00\x00\x00\x0cJXL\x20\x0d\x0a\x87\x0a"))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
new file mode 100644
index 000000000..466058fbe
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
@@ -0,0 +1,239 @@
+// Package magic holds the matching functions used to find MIME types.
+package magic
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type (
+ // Detector receiveѕ the raw data of a file and returns whether the data
+ // meets any conditions. The limit parameter is an upper limit to the number
+ // of bytes received and is used to tell if the byte slice represents the
+ // whole file or is just the header of a file: len(raw) < limit or len(raw)>limit.
+ Detector func(raw []byte, limit uint32) bool
+ xmlSig struct {
+ // the local name of the root tag
+ localName []byte
+ // the namespace of the XML document
+ xmlns []byte
+ }
+)
+
+// prefix creates a Detector which returns true if any of the provided signatures
+// is the prefix of the raw input.
+func prefix(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if bytes.HasPrefix(raw, s) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// offset creates a Detector which returns true if the provided signature can be
+// found at offset in the raw input.
+func offset(sig []byte, offset int) Detector {
+ return func(raw []byte, limit uint32) bool {
+ return len(raw) > offset && bytes.HasPrefix(raw[offset:], sig)
+ }
+}
+
+// ciPrefix is like prefix but the check is case insensitive.
+func ciPrefix(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if ciCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func ciCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+1 {
+ return false
+ }
+ // perform case insensitive check
+ for i, b := range sig {
+ db := raw[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return false
+ }
+ }
+
+ return true
+}
+
+// xml creates a Detector which returns true if any of the provided XML signatures
+// matches the raw input.
+func xml(sigs ...xmlSig) Detector {
+ return func(raw []byte, limit uint32) bool {
+ raw = trimLWS(raw)
+ if len(raw) == 0 {
+ return false
+ }
+ for _, s := range sigs {
+ if xmlCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func xmlCheck(sig xmlSig, raw []byte) bool {
+ raw = raw[:min(len(raw), 512)]
+
+ if len(sig.localName) == 0 {
+ return bytes.Index(raw, sig.xmlns) > 0
+ }
+ if len(sig.xmlns) == 0 {
+ return bytes.Index(raw, sig.localName) > 0
+ }
+
+ localNameIndex := bytes.Index(raw, sig.localName)
+ return localNameIndex != -1 && localNameIndex < bytes.Index(raw, sig.xmlns)
+}
+
+// markup creates a Detector which returns true is any of the HTML signatures
+// matches the raw input.
+func markup(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ if bytes.HasPrefix(raw, []byte{0xEF, 0xBB, 0xBF}) {
+ // We skip the UTF-8 BOM if present to ensure we correctly
+ // process any leading whitespace. The presence of the BOM
+ // is taken into account during charset detection in charset.go.
+ raw = trimLWS(raw[3:])
+ } else {
+ raw = trimLWS(raw)
+ }
+ if len(raw) == 0 {
+ return false
+ }
+ for _, s := range sigs {
+ if markupCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func markupCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+1 {
+ return false
+ }
+
+ // perform case insensitive check
+ for i, b := range sig {
+ db := raw[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return false
+ }
+ }
+ // Next byte must be space or right angle bracket.
+ if db := raw[len(sig)]; db != ' ' && db != '>' {
+ return false
+ }
+
+ return true
+}
+
+// ftyp creates a Detector which returns true if any of the FTYP signatures
+// matches the raw input.
+func ftyp(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ for _, s := range sigs {
+ if bytes.Equal(raw[4:12], append([]byte("ftyp"), s...)) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func newXMLSig(localName, xmlns string) xmlSig {
+ ret := xmlSig{xmlns: []byte(xmlns)}
+ if localName != "" {
+ ret.localName = []byte(fmt.Sprintf("<%s", localName))
+ }
+
+ return ret
+}
+
+// A valid shebang starts with the "#!" characters,
+// followed by any number of spaces,
+// followed by the path to the interpreter,
+// and, optionally, followed by the arguments for the interpreter.
+//
+// Ex:
+// #! /usr/bin/env php
+// /usr/bin/env is the interpreter, php is the first and only argument.
+func shebang(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if shebangCheck(s, firstLine(raw)) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func shebangCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+2 {
+ return false
+ }
+ if raw[0] != '#' || raw[1] != '!' {
+ return false
+ }
+
+ return bytes.Equal(trimLWS(trimRWS(raw[2:])), sig)
+}
+
+// trimLWS trims whitespace from beginning of the input.
+func trimLWS(in []byte) []byte {
+ firstNonWS := 0
+ for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ {
+ }
+
+ return in[firstNonWS:]
+}
+
+// trimRWS trims whitespace from the end of the input.
+func trimRWS(in []byte) []byte {
+ lastNonWS := len(in) - 1
+ for ; lastNonWS > 0 && isWS(in[lastNonWS]); lastNonWS-- {
+ }
+
+ return in[:lastNonWS+1]
+}
+
+func firstLine(in []byte) []byte {
+ lineEnd := 0
+ for ; lineEnd < len(in) && in[lineEnd] != '\n'; lineEnd++ {
+ }
+
+ return in[:lineEnd]
+}
+
+func isWS(b byte) bool {
+ return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' '
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
new file mode 100644
index 000000000..5964ce596
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
@@ -0,0 +1,225 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ xlsxSigFiles = []string{
+ "xl/worksheets/",
+ "xl/drawings/",
+ "xl/theme/",
+ "xl/_rels/",
+ "xl/styles.xml",
+ "xl/workbook.xml",
+ "xl/sharedStrings.xml",
+ }
+ docxSigFiles = []string{
+ "word/media/",
+ "word/_rels/document.xml.rels",
+ "word/document.xml",
+ "word/styles.xml",
+ "word/fontTable.xml",
+ "word/settings.xml",
+ "word/numbering.xml",
+ "word/header",
+ "word/footer",
+ }
+ pptxSigFiles = []string{
+ "ppt/slides/",
+ "ppt/media/",
+ "ppt/slideLayouts/",
+ "ppt/theme/",
+ "ppt/slideMasters/",
+ "ppt/tags/",
+ "ppt/notesMasters/",
+ "ppt/_rels/",
+ "ppt/handoutMasters/",
+ "ppt/notesSlides/",
+ "ppt/presentation.xml",
+ "ppt/tableStyles.xml",
+ "ppt/presProps.xml",
+ "ppt/viewProps.xml",
+ }
+)
+
+// Xlsx matches a Microsoft Excel 2007 file.
+func Xlsx(raw []byte, limit uint32) bool {
+ return zipContains(raw, xlsxSigFiles...)
+}
+
+// Docx matches a Microsoft Word 2007 file.
+func Docx(raw []byte, limit uint32) bool {
+ return zipContains(raw, docxSigFiles...)
+}
+
+// Pptx matches a Microsoft PowerPoint 2007 file.
+func Pptx(raw []byte, limit uint32) bool {
+ return zipContains(raw, pptxSigFiles...)
+}
+
+// Ole matches an Open Linking and Embedding file.
+//
+// https://en.wikipedia.org/wiki/Object_Linking_and_Embedding
+func Ole(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1})
+}
+
+// Aaf matches an Advanced Authoring Format file.
+// See: https://pyaaf.readthedocs.io/en/latest/about.html
+// See: https://en.wikipedia.org/wiki/Advanced_Authoring_Format
+func Aaf(raw []byte, limit uint32) bool {
+ if len(raw) < 31 {
+ return false
+ }
+ return bytes.HasPrefix(raw[8:], []byte{0x41, 0x41, 0x46, 0x42, 0x0D, 0x00, 0x4F, 0x4D}) &&
+ (raw[30] == 0x09 || raw[30] == 0x0C)
+}
+
+// Doc matches a Microsoft Word 97-2003 file.
+// See: https://github.com/decalage2/oletools/blob/412ee36ae45e70f42123e835871bac956d958461/oletools/common/clsid.py
+func Doc(raw []byte, _ uint32) bool {
+ clsids := [][]byte{
+ // Microsoft Word 97-2003 Document (Word.Document.8)
+ {0x06, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ // Microsoft Word 6.0-7.0 Document (Word.Document.6)
+ {0x00, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ // Microsoft Word Picture (Word.Picture.8)
+ {0x07, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ }
+
+ for _, clsid := range clsids {
+ if matchOleClsid(raw, clsid) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Ppt matches a Microsoft PowerPoint 97-2003 file or a PowerPoint 95 presentation.
+func Ppt(raw []byte, limit uint32) bool {
+ // Root CLSID test is the safest way to detect identify OLE, however, the format
+ // often places the root CLSID at the end of the file.
+ if matchOleClsid(raw, []byte{
+ 0x10, 0x8d, 0x81, 0x64, 0x9b, 0x4f, 0xcf, 0x11,
+ 0x86, 0xea, 0x00, 0xaa, 0x00, 0xb9, 0x29, 0xe8,
+ }) || matchOleClsid(raw, []byte{
+ 0x70, 0xae, 0x7b, 0xea, 0x3b, 0xfb, 0xcd, 0x11,
+ 0xa9, 0x03, 0x00, 0xaa, 0x00, 0x51, 0x0e, 0xa3,
+ }) {
+ return true
+ }
+
+ lin := len(raw)
+ if lin < 520 {
+ return false
+ }
+ pptSubHeaders := [][]byte{
+ {0xA0, 0x46, 0x1D, 0xF0},
+ {0x00, 0x6E, 0x1E, 0xF0},
+ {0x0F, 0x00, 0xE8, 0x03},
+ }
+ for _, h := range pptSubHeaders {
+ if bytes.HasPrefix(raw[512:], h) {
+ return true
+ }
+ }
+
+ if bytes.HasPrefix(raw[512:], []byte{0xFD, 0xFF, 0xFF, 0xFF}) &&
+ raw[518] == 0x00 && raw[519] == 0x00 {
+ return true
+ }
+
+ return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)],
+ []byte("P\x00o\x00w\x00e\x00r\x00P\x00o\x00i\x00n\x00t\x00 D\x00o\x00c\x00u\x00m\x00e\x00n\x00t"))
+}
+
+// Xls matches a Microsoft Excel 97-2003 file.
+func Xls(raw []byte, limit uint32) bool {
+ // Root CLSID test is the safest way to detect identify OLE, however, the format
+ // often places the root CLSID at the end of the file.
+ if matchOleClsid(raw, []byte{
+ 0x10, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }) || matchOleClsid(raw, []byte{
+ 0x20, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }) {
+ return true
+ }
+
+ lin := len(raw)
+ if lin < 520 {
+ return false
+ }
+ xlsSubHeaders := [][]byte{
+ {0x09, 0x08, 0x10, 0x00, 0x00, 0x06, 0x05, 0x00},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x10},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x1F},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x22},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x23},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x28},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x29},
+ }
+ for _, h := range xlsSubHeaders {
+ if bytes.HasPrefix(raw[512:], h) {
+ return true
+ }
+ }
+
+ return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)],
+ []byte("W\x00k\x00s\x00S\x00S\x00W\x00o\x00r\x00k\x00B\x00o\x00o\x00k"))
+}
+
+// Pub matches a Microsoft Publisher file.
+func Pub(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x01, 0x12, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Msg matches a Microsoft Outlook email file.
+func Msg(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x0B, 0x0D, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Msi matches a Microsoft Windows Installer file.
+// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
+func Msi(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x84, 0x10, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Helper to match by a specific CLSID of a compound file.
+//
+// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
+func matchOleClsid(in []byte, clsid []byte) bool {
+ // Microsoft Compound files v3 have a sector length of 512, while v4 has 4096.
+ // Change sector offset depending on file version.
+ // https://www.loc.gov/preservation/digital/formats/fdd/fdd000392.shtml
+ sectorLength := 512
+ if len(in) < sectorLength {
+ return false
+ }
+ if in[26] == 0x04 && in[27] == 0x00 {
+ sectorLength = 4096
+ }
+
+ // SecID of first sector of the directory stream.
+ firstSecID := int(binary.LittleEndian.Uint32(in[48:52]))
+
+ // Expected offset of CLSID for root storage object.
+ clsidOffset := sectorLength*(1+firstSecID) + 80
+
+ if len(in) <= clsidOffset+16 {
+ return false
+ }
+
+ return bytes.HasPrefix(in[clsidOffset:], clsid)
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
new file mode 100644
index 000000000..bb4cd781b
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
@@ -0,0 +1,42 @@
+package magic
+
+import (
+ "bytes"
+)
+
+/*
+ NOTE:
+
+ In May 2003, two Internet RFCs were published relating to the format.
+ The Ogg bitstream was defined in RFC 3533 (which is classified as
+ 'informative') and its Internet content type (application/ogg) in RFC
+ 3534 (which is, as of 2006, a proposed standard protocol). In
+ September 2008, RFC 3534 was obsoleted by RFC 5334, which added
+ content types video/ogg, audio/ogg and filename extensions .ogx, .ogv,
+ .oga, .spx.
+
+ See:
+ https://tools.ietf.org/html/rfc3533
+ https://developer.mozilla.org/en-US/docs/Web/HTTP/Configuring_servers_for_Ogg_media#Serve_media_with_the_correct_MIME_type
+ https://github.com/file/file/blob/master/magic/Magdir/vorbis
+*/
+
+// Ogg matches an Ogg file.
+func Ogg(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte("\x4F\x67\x67\x53\x00"))
+}
+
+// OggAudio matches an audio ogg file.
+func OggAudio(raw []byte, limit uint32) bool {
+ return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x7fFLAC")) ||
+ bytes.HasPrefix(raw[28:], []byte("\x01vorbis")) ||
+ bytes.HasPrefix(raw[28:], []byte("OpusHead")) ||
+ bytes.HasPrefix(raw[28:], []byte("Speex\x20\x20\x20")))
+}
+
+// OggVideo matches a video ogg file.
+func OggVideo(raw []byte, limit uint32) bool {
+ return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x80theora")) ||
+ bytes.HasPrefix(raw[28:], []byte("fishead\x00")) ||
+ bytes.HasPrefix(raw[28:], []byte("\x01video\x00\x00\x00"))) // OGM video
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
new file mode 100644
index 000000000..e2a03caf5
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
@@ -0,0 +1,375 @@
+package magic
+
+import (
+ "bufio"
+ "bytes"
+ "strings"
+ "time"
+
+ "github.com/gabriel-vasile/mimetype/internal/charset"
+ "github.com/gabriel-vasile/mimetype/internal/json"
+)
+
+var (
+ // HTML matches a Hypertext Markup Language file.
+ HTML = markup(
+ []byte(" 0
+}
+
+// GeoJSON matches a RFC 7946 GeoJSON file.
+//
+// GeoJSON detection implies searching for key:value pairs like: `"type": "Feature"`
+// in the input.
+// BUG(gabriel-vasile): The "type" key should be searched for in the root object.
+func GeoJSON(raw []byte, limit uint32) bool {
+ raw = trimLWS(raw)
+ if len(raw) == 0 {
+ return false
+ }
+ // GeoJSON is always a JSON object, not a JSON array or any other JSON value.
+ if raw[0] != '{' {
+ return false
+ }
+
+ s := []byte(`"type"`)
+ si, sl := bytes.Index(raw, s), len(s)
+
+ if si == -1 {
+ return false
+ }
+
+ // If the "type" string is the suffix of the input,
+ // there is no need to search for the value of the key.
+ if si+sl == len(raw) {
+ return false
+ }
+ // Skip the "type" part.
+ raw = raw[si+sl:]
+ // Skip any whitespace before the colon.
+ raw = trimLWS(raw)
+ // Check for colon.
+ if len(raw) == 0 || raw[0] != ':' {
+ return false
+ }
+ // Skip any whitespace after the colon.
+ raw = trimLWS(raw[1:])
+
+ geoJSONTypes := [][]byte{
+ []byte(`"Feature"`),
+ []byte(`"FeatureCollection"`),
+ []byte(`"Point"`),
+ []byte(`"LineString"`),
+ []byte(`"Polygon"`),
+ []byte(`"MultiPoint"`),
+ []byte(`"MultiLineString"`),
+ []byte(`"MultiPolygon"`),
+ []byte(`"GeometryCollection"`),
+ }
+ for _, t := range geoJSONTypes {
+ if bytes.HasPrefix(raw, t) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// NdJSON matches a Newline delimited JSON file. All complete lines from raw
+// must be valid JSON documents meaning they contain one of the valid JSON data
+// types.
+func NdJSON(raw []byte, limit uint32) bool {
+ lCount, hasObjOrArr := 0, false
+ sc := bufio.NewScanner(dropLastLine(raw, limit))
+ for sc.Scan() {
+ l := sc.Bytes()
+ // Empty lines are allowed in NDJSON.
+ if l = trimRWS(trimLWS(l)); len(l) == 0 {
+ continue
+ }
+ _, err := json.Scan(l)
+ if err != nil {
+ return false
+ }
+ if l[0] == '[' || l[0] == '{' {
+ hasObjOrArr = true
+ }
+ lCount++
+ }
+
+ return lCount > 1 && hasObjOrArr
+}
+
+// HAR matches a HAR Spec file.
+// Spec: http://www.softwareishard.com/blog/har-12-spec/
+func HAR(raw []byte, limit uint32) bool {
+ s := []byte(`"log"`)
+ si, sl := bytes.Index(raw, s), len(s)
+
+ if si == -1 {
+ return false
+ }
+
+ // If the "log" string is the suffix of the input,
+ // there is no need to search for the value of the key.
+ if si+sl == len(raw) {
+ return false
+ }
+ // Skip the "log" part.
+ raw = raw[si+sl:]
+ // Skip any whitespace before the colon.
+ raw = trimLWS(raw)
+ // Check for colon.
+ if len(raw) == 0 || raw[0] != ':' {
+ return false
+ }
+ // Skip any whitespace after the colon.
+ raw = trimLWS(raw[1:])
+
+ harJSONTypes := [][]byte{
+ []byte(`"version"`),
+ []byte(`"creator"`),
+ []byte(`"entries"`),
+ }
+ for _, t := range harJSONTypes {
+ si := bytes.Index(raw, t)
+ if si > -1 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Svg matches a SVG file.
+func Svg(raw []byte, limit uint32) bool {
+ return bytes.Contains(raw, []byte("